Chaste Release::3.1
AbstractTetrahedralMeshWriter.cpp
00001 /*
00002 
00003 Copyright (c) 2005-2012, University of Oxford.
00004 All rights reserved.
00005 
00006 University of Oxford means the Chancellor, Masters and Scholars of the
00007 University of Oxford, having an administrative office at Wellington
00008 Square, Oxford OX1 2JD, UK.
00009 
00010 This file is part of Chaste.
00011 
00012 Redistribution and use in source and binary forms, with or without
00013 modification, are permitted provided that the following conditions are met:
00014  * Redistributions of source code must retain the above copyright notice,
00015    this list of conditions and the following disclaimer.
00016  * Redistributions in binary form must reproduce the above copyright notice,
00017    this list of conditions and the following disclaimer in the documentation
00018    and/or other materials provided with the distribution.
00019  * Neither the name of the University of Oxford nor the names of its
00020    contributors may be used to endorse or promote products derived from this
00021    software without specific prior written permission.
00022 
00023 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
00024 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00025 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
00026 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
00027 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
00028 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
00029 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
00030 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
00031 LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
00032 OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00033 
00034 */
00035 
00036 // Disable PETSc logging of MPI calls (we don't use this, anyway) to fix
00037 // "right-hand operand of comma has no effect" warnings when building with
00038 // PETSc 2.2.1.
00039 #define PETSC_HAVE_BROKEN_RECURSIVE_MACRO
00040 
00041 #include "AbstractTetrahedralMeshWriter.hpp"
00042 #include "AbstractTetrahedralMesh.hpp"
00043 #include "DistributedTetrahedralMesh.hpp"
00044 #include "MixedDimensionMesh.hpp"
00045 #include "Version.hpp"
00046 #include "Exception.hpp"
00047 
00048 #include <mpi.h> // For MPI_Send, MPI_Recv
00049 
00053 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00054 struct MeshWriterIterators
00055 {
00057     typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator* pNodeIter;
00059     typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator* pElemIter;
00061     typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::BoundaryElementIterator* pBoundaryElemIter;
00062 };
00063 
00065 // Implementation
00067 
00068 
00069 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00070 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AbstractTetrahedralMeshWriter(const std::string &rDirectory,
00071                    const std::string &rBaseName,
00072                    const bool clearOutputDir)
00073     : AbstractMeshWriter<ELEMENT_DIM, SPACE_DIM>(rDirectory, rBaseName, clearOutputDir),
00074       mpMesh(NULL),
00075       mpNodeMap(NULL),
00076       mNodesPerElement(ELEMENT_DIM+1),
00077       mNodesPerBoundaryElement(ELEMENT_DIM),
00078       mpDistributedMesh(NULL),
00079       mpMixedMesh(NULL),
00080       mpIters(new MeshWriterIterators<ELEMENT_DIM,SPACE_DIM>),
00081       mNodeCounterForParallelMesh(0),
00082       mElementCounterForParallelMesh(0),
00083       mBoundaryElementCounterForParallelMesh(0),
00084       mCableElementCounterForParallelMesh(0),
00085       mFilesAreBinary(false)
00086 {
00087     mpIters->pNodeIter = NULL;
00088     mpIters->pElemIter = NULL;
00089     mpIters->pBoundaryElemIter = NULL;
00090 }
00091 
00092 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00093 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::~AbstractTetrahedralMeshWriter()
00094 {
00095     if (mpIters->pNodeIter)
00096     {
00097         delete mpIters->pNodeIter;
00098     }
00099     if (mpIters->pElemIter)
00100     {
00101         delete mpIters->pElemIter;
00102     }
00103     if (mpIters->pBoundaryElemIter)
00104     {
00105         delete mpIters->pBoundaryElemIter;
00106     }
00107 
00108     delete mpIters;
00109 
00110     if (mpNodeMap)
00111     {
00112         delete mpNodeMap;
00113     }
00114 }
00115 
00116 
00117 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00118 std::vector<double> AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextNode()
00119 {
00120     // if we are writing from a mesh..
00121     assert(PetscTools::AmMaster());
00122     if (mpMesh)
00123     {
00124         std::vector<double> coords(SPACE_DIM);
00125         double raw_coords[SPACE_DIM];
00126 
00127         //Iterate over the locally-owned nodes
00128         if ( (*(mpIters->pNodeIter)) != mpMesh->GetNodeIteratorEnd())
00129         {
00130             // Either this is a sequential mesh (and we own it all)
00131             // or it's parallel (and the master owns the first chunk)
00132             for (unsigned j=0; j<SPACE_DIM; j++)
00133             {
00134                 coords[j] = (*(mpIters->pNodeIter))->GetPoint()[j];
00135             }
00136 
00137             mNodeCounterForParallelMesh=(*(mpIters->pNodeIter))->GetIndex() + 1;//Ready for when we run out of local nodes
00138 
00139             ++(*(mpIters->pNodeIter));
00140             return coords;
00141         }
00142 
00143         // If we didn't return then the iterator has reached the end of the local nodes.
00144         // It must be a parallel mesh and we are expecting messages...
00145 
00146         assert( mpDistributedMesh != NULL );
00147 
00148         MPI_Status status;
00149         status.MPI_ERROR = MPI_SUCCESS; //For MPICH2
00150         // do receive, convert to std::vector on master
00151         MPI_Recv(raw_coords, SPACE_DIM, MPI_DOUBLE, MPI_ANY_SOURCE, mNodeCounterForParallelMesh, PETSC_COMM_WORLD, &status);
00152         assert(status.MPI_ERROR == MPI_SUCCESS);
00153         for (unsigned j=0; j<coords.size(); j++)
00154         {
00155             coords[j] = raw_coords[j];
00156         }
00157 
00158         mNodeCounterForParallelMesh++;
00159         return coords;
00160     }
00161     else
00162     {
00163         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextNode();
00164     }
00165 }
00166 
00167 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00168 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextElement()
00169 {
00170     assert(PetscTools::AmMaster());
00171     // if we are writing from a mesh..
00172     if (mpMesh)
00173     {
00174         ElementData elem_data;
00175         elem_data.NodeIndices.resize(mNodesPerElement);
00176 
00177         if ( mpDistributedMesh == NULL ) // not using parallel mesh
00178         {
00179             // Use the iterator
00180             assert(this->mNumElements==mpMesh->GetNumElements());
00181 
00182             for (unsigned j=0; j<elem_data.NodeIndices.size(); j++)
00183             {
00184                 unsigned old_index = (*(mpIters->pElemIter))->GetNodeGlobalIndex(j);
00185                 elem_data.NodeIndices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00186             }
00187             // Set attribute
00188             elem_data.AttributeValue = (*(mpIters->pElemIter))->GetAttribute();
00189 
00190             ++(*(mpIters->pElemIter));
00191 
00192             return elem_data;
00193         }
00194         else //Parallel mesh
00195         {
00196             //Use the mElementCounterForParallelMesh variable to identify next element
00197             if ( mpDistributedMesh->CalculateDesignatedOwnershipOfElement( mElementCounterForParallelMesh ) == true )
00198             {
00199                 //Master owns this element
00200                 Element<ELEMENT_DIM, SPACE_DIM>* p_element = mpDistributedMesh->GetElement(mElementCounterForParallelMesh);
00201                 assert(elem_data.NodeIndices.size() == mNodesPerElement);
00202                 assert( ! p_element->IsDeleted() );
00203                 //Master can use the local data to recover node indices & attribute
00204                 for (unsigned j=0; j<mNodesPerElement; j++)
00205                 {
00206                     elem_data.NodeIndices[j] = p_element->GetNodeGlobalIndex(j);
00207                 }
00208                 elem_data.AttributeValue = p_element->GetAttribute();
00209             }
00210             else
00211             {
00212                 //Master doesn't own this element.
00213                 // +1 to allow for attribute value too
00214                 unsigned raw_indices[mNodesPerElement+1];
00215                 MPI_Status status;
00216                 //Get it from elsewhere
00217                 MPI_Recv(raw_indices, mNodesPerElement+1, MPI_UNSIGNED, MPI_ANY_SOURCE,
00218                          this->mNumNodes + mElementCounterForParallelMesh,
00219                          PETSC_COMM_WORLD, &status);
00220                 // Convert to std::vector
00221                 for (unsigned j=0; j< elem_data.NodeIndices.size(); j++)
00222                 {
00223                     elem_data.NodeIndices[j] = raw_indices[j];
00224                 }
00225 
00226                 // Attribute value
00227                 elem_data.AttributeValue = raw_indices[mNodesPerElement];
00228             }
00229             // increment element counter
00230             mElementCounterForParallelMesh++;
00231 
00232             return elem_data; // non-master processors will return garbage here - but they should never write to file
00233         }
00234     }
00235     else // not writing from a mesh
00236     {
00237         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextElement();
00238     }
00239 }
00240 
00241 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00242 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextBoundaryElement()
00243 {
00244     assert(PetscTools::AmMaster());
00245     // if we are writing from a mesh..
00246     if (mpMesh)
00247     {
00248         ElementData boundary_elem_data;
00249         boundary_elem_data.NodeIndices.resize(mNodesPerBoundaryElement);
00250 
00251         if ( mpDistributedMesh == NULL ) // not using parallel mesh
00252         {
00253             // Use the iterator
00254             assert(this->mNumBoundaryElements==mpMesh->GetNumBoundaryElements());
00255 
00256             for (unsigned j=0; j<boundary_elem_data.NodeIndices.size(); j++)
00257             {
00258                 unsigned old_index = (*(*(mpIters->pBoundaryElemIter)))->GetNodeGlobalIndex(j);
00259                 boundary_elem_data.NodeIndices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00260             }
00261 
00262             ++(*(mpIters->pBoundaryElemIter));
00263 
00264             return boundary_elem_data;
00265         }
00266         else //Parallel mesh
00267         {
00268             //Use the mElementCounterForParallelMesh variable to identify next element
00269             if ( mpDistributedMesh->CalculateDesignatedOwnershipOfBoundaryElement( mBoundaryElementCounterForParallelMesh ) == true )
00270             {
00271                 //Master owns this boundary element
00272                 BoundaryElement<ELEMENT_DIM-1, SPACE_DIM>* p_boundary_element = mpDistributedMesh->GetBoundaryElement(mBoundaryElementCounterForParallelMesh);
00273                 assert(boundary_elem_data.NodeIndices.size() == ELEMENT_DIM);
00274                 assert( ! p_boundary_element->IsDeleted() );
00275                 //Master can use the local data to recover node indices & attribute
00276                 for (unsigned j=0; j<ELEMENT_DIM; j++)
00277                 {
00278                     boundary_elem_data.NodeIndices[j] = p_boundary_element->GetNodeGlobalIndex(j);
00279                 }
00280             }
00281             else
00282             {
00283                 //Master doesn't own this boundary element.
00284                 unsigned raw_indices[ELEMENT_DIM];
00285                 MPI_Status status;
00286                 //Get it from elsewhere
00287                 MPI_Recv(raw_indices, ELEMENT_DIM, MPI_UNSIGNED, MPI_ANY_SOURCE,
00288                          this->mNumNodes + this->mNumElements + mBoundaryElementCounterForParallelMesh,
00289                          PETSC_COMM_WORLD, &status);
00290                 // Convert to std::vector
00291                 for (unsigned j=0; j< boundary_elem_data.NodeIndices.size(); j++)
00292                 {
00293                     boundary_elem_data.NodeIndices[j] = raw_indices[j];
00294                 }
00295             }
00296             // increment element counter
00297             mBoundaryElementCounterForParallelMesh++;
00298 
00299             return boundary_elem_data; // non-master processors will return garbage here - but they should never write to file
00300         }
00301     }
00302     else // not writing from a mesh
00303     {
00304         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextBoundaryElement();
00305     }
00306 }
00307 
00308 
00309 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00310 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextCableElement()
00311 {
00312     assert(PetscTools::AmMaster());
00313 
00314     // if we are writing from a mesh..
00315     if (mpMesh)
00316     {
00317         // Need to be using a MixedDimensionMesh or there will be no cable data
00318         assert(mpMixedMesh);
00319 
00320         ElementData elem_data;
00321         elem_data.NodeIndices.resize(2);
00322 
00323         //Use the mCableElementCounterForParallelMesh variable to identify next element
00324         if ( mpMixedMesh->CalculateDesignatedOwnershipOfCableElement( mCableElementCounterForParallelMesh ) == true )
00325         {
00326             //Master owns this element
00327             Element<1, SPACE_DIM>* p_element = mpMixedMesh->GetCableElement(mCableElementCounterForParallelMesh);
00328             assert( ! p_element->IsDeleted() );
00329             //Master can use the local data to recover node indices & attribute
00330             for (unsigned j=0; j<2; j++)
00331             {
00332                 elem_data.NodeIndices[j] = p_element->GetNodeGlobalIndex(j);
00333             }
00334             elem_data.AttributeValue = p_element->GetAttribute();
00335         }
00336         else
00337         {
00338             //Master doesn't own this element.
00339             unsigned raw_indices[2];
00340             MPI_Status status;
00341             //Get it from elsewhere
00342             MPI_Recv(raw_indices, 2, MPI_UNSIGNED, MPI_ANY_SOURCE,
00343                      this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + mCableElementCounterForParallelMesh,
00344                      PETSC_COMM_WORLD, &status);
00345 
00346             // Convert to ElementData (2 nodes plus an attribute value)
00347             for (unsigned j=0; j< 2; j++)
00348             {
00349                 elem_data.NodeIndices[j] = raw_indices[j];
00350             }
00351             // Attribute value
00352             double radius;
00353             MPI_Recv(&radius, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
00354                      this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + mCableElementCounterForParallelMesh,
00355                      PETSC_COMM_WORLD, &status);
00356             elem_data.AttributeValue = radius;
00357         }
00358         // increment element counter
00359         mCableElementCounterForParallelMesh++;
00360 
00361         return elem_data; // non-master processors will return garbage here - but they should never write to file
00362     }
00363     else // not writing from a mesh
00364     {
00365         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextCableElement();
00366     }
00367 }
00368 
00369 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00370 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteNclFile(
00371         AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>& rMesh,
00372         bool invertMeshPermutation)
00373 {
00374     unsigned max_elements_all;
00375     if (PetscTools::IsSequential())
00376     {
00377         max_elements_all = rMesh.CalculateMaximumContainingElementsPerProcess();
00378     }
00379     else
00380     {
00381         unsigned max_elements_per_process = rMesh.CalculateMaximumContainingElementsPerProcess();
00382         MPI_Allreduce(&max_elements_per_process, &max_elements_all, 1, MPI_UNSIGNED, MPI_MAX, PETSC_COMM_WORLD);
00383     }
00384 
00385     std::string node_connect_list_file_name = this->mBaseName + ".ncl";
00386     if (invertMeshPermutation && !rMesh.rGetNodePermutation().empty())
00387     {
00388         node_connect_list_file_name += "-tmp";
00389     }
00390 
00391     PetscTools::BeginRoundRobin();
00392     {
00393         out_stream p_ncl_file=out_stream(NULL);
00394 
00395         if (PetscTools::AmMaster())
00396         {
00397             //Open the file for the first time
00398             p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(node_connect_list_file_name);
00399 
00400             // Write the ncl header
00401             *p_ncl_file << rMesh.GetNumNodes() << "\t";
00402             *p_ncl_file << max_elements_all << "\t";
00403             *p_ncl_file << "\tBIN\n";
00404         }
00405         else
00406         {
00407             // Append to the existing file
00408             p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(node_connect_list_file_name, std::ios::app);
00409         }
00410 
00411         // Write each node's data
00412         unsigned default_marker = UINT_MAX;
00413 
00414         typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00415         for (NodeIterType iter = rMesh.GetNodeIteratorBegin();
00416              iter != rMesh.GetNodeIteratorEnd();
00417              ++iter)
00418         {
00419             // Get the containing element indices from the node's set and sort them
00420             std::set<unsigned>& r_elem_set = iter->rGetContainingElementIndices();
00421             std::vector<unsigned> elem_vector(r_elem_set.begin(), r_elem_set.end());
00422             std::sort(elem_vector.begin(), elem_vector.end());
00423             // Pad the vector with unsigned markers
00424             for (unsigned elem_index=elem_vector.size();  elem_index<max_elements_all; elem_index++)
00425             {
00426                 elem_vector.push_back(default_marker);
00427             }
00428             assert (elem_vector.size() == max_elements_all);
00429             // Write raw data out of std::vector into the file
00430             p_ncl_file->write((char*)&elem_vector[0], elem_vector.size()*sizeof(unsigned));
00431         }
00432 
00433         if (PetscTools::AmTopMost())
00434         {
00435             *p_ncl_file << "#\n# " + ChasteBuildInfo::GetProvenanceString();
00436         }
00437 
00438         p_ncl_file->close();
00439     }
00440     PetscTools::EndRoundRobin();
00441 
00442     if (invertMeshPermutation && PetscTools::AmMaster() && !rMesh.rGetNodePermutation().empty())
00443     {
00444         // Open files
00445         const std::string real_node_connect_list_file_name = this->mBaseName + ".ncl";
00446         out_stream p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(real_node_connect_list_file_name);
00447         FileFinder temp_ncl_path = this->mpOutputFileHandler->FindFile(node_connect_list_file_name);
00448         std::ifstream temp_ncl_file(temp_ncl_path.GetAbsolutePath().c_str());
00449         // Copy the header
00450         std::string header_line;
00451         getline(temp_ncl_file, header_line);
00452         (*p_ncl_file) << header_line << "\n";
00453         const std::streampos data_start = temp_ncl_file.tellg();
00454         const std::streamoff item_width = max_elements_all * sizeof(unsigned);
00455         // Copy the binary data, permuted
00456         std::vector<unsigned> elem_vector(max_elements_all);
00457         for (unsigned node_index=0; node_index<rMesh.GetNumAllNodes(); node_index++)
00458         {
00459             unsigned permuted_index = rMesh.rGetNodePermutation()[node_index];
00460             temp_ncl_file.seekg(data_start + item_width * permuted_index, std::ios_base::beg);
00461             temp_ncl_file.read((char*)&elem_vector[0], max_elements_all*sizeof(unsigned));
00462             p_ncl_file->write((char*)&elem_vector[0], max_elements_all*sizeof(unsigned));
00463         }
00464         // Footer
00465         *p_ncl_file << "#\n# " + ChasteBuildInfo::GetProvenanceString();
00466         p_ncl_file->close();
00467         // Remove temp file
00468         remove(temp_ncl_path.GetAbsolutePath().c_str());
00469     }
00470     PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteNclFile");
00471 }
00472 
00474 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00475 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMesh(
00476       AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh,
00477       bool keepOriginalElementIndexing)
00478 {
00479     this->mpMeshReader = NULL;
00480     mpMesh = &rMesh;
00481 
00482     this->mNumNodes = mpMesh->GetNumNodes();
00483     this->mNumElements = mpMesh->GetNumElements();
00484     this->mNumBoundaryElements = mpMesh->GetNumBoundaryElements();
00485     this->mNumCableElements = mpMesh->GetNumCableElements();
00486 
00487     typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00488     mpIters->pNodeIter = new NodeIterType(mpMesh->GetNodeIteratorBegin());
00489 
00490     typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElemIterType;
00491     mpIters->pElemIter = new ElemIterType(mpMesh->GetElementIteratorBegin());
00492 
00493     typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::BoundaryElementIterator BoundaryElemIterType;
00494     mpIters->pBoundaryElemIter = new BoundaryElemIterType(mpMesh->GetBoundaryElementIteratorBegin());
00495 
00496     //Use this process's first element to gauge the size of all the elements
00497     if ( (*(mpIters->pElemIter)) != mpMesh->GetElementIteratorEnd())
00498     {
00499         mNodesPerElement = (*(mpIters->pElemIter))->GetNumNodes();
00500     }
00501 
00502     //Use this process's first boundary element to gauge the size of all the boundary elements
00503     if ( (*(mpIters->pBoundaryElemIter)) != mpMesh->GetBoundaryElementIteratorEnd())
00504     {
00505         mNodesPerBoundaryElement = (*(*(mpIters->pBoundaryElemIter)))->GetNumNodes();
00506     }
00507 
00508     //Connectivity file is written when we write to a binary file (only available for TrianglesMeshWriter) and if we are preserving the element order
00509     if (this->mFilesAreBinary && keepOriginalElementIndexing)
00510     {
00511         WriteNclFile(*mpMesh);
00512     }
00513 
00514     // Have we got a parallel mesh?
00516     mpDistributedMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh);
00517 
00518     // Have we got a MixedDimensionMesh?
00520     mpMixedMesh = dynamic_cast<MixedDimensionMesh<ELEMENT_DIM,SPACE_DIM>* >(this->mpMesh);
00521 
00522     if (mpDistributedMesh != NULL)
00523     {
00524         // It's a parallel mesh
00525         WriteFilesUsingParallelMesh(keepOriginalElementIndexing);
00526         return;
00527     }
00528 
00529     if (!PetscTools::AmMaster())
00530     {
00531         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); //Paired with Master process writing files
00532         return;
00533     }
00534 
00535     // Set up node map if we might have deleted nodes
00536     unsigned node_map_index = 0;
00537     if (mpMesh->IsMeshChanging())
00538     {
00539         mpNodeMap = new NodeMap(mpMesh->GetNumAllNodes());
00540         for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00541         {
00542             mpNodeMap->SetNewIndex(it->GetIndex(), node_map_index++);
00543         }
00544     }
00545 
00546     this->WriteFiles();
00547     PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); // Paired with waiting Slave processes
00548     delete mpIters->pNodeIter;
00549     mpIters->pNodeIter = NULL;
00550     delete mpIters->pElemIter;
00551     mpIters->pElemIter = NULL;
00552     delete mpIters->pBoundaryElemIter;
00553     mpIters->pBoundaryElemIter = NULL;
00554 }
00555 
00556 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00557 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMeshReaderAndMesh(
00558         AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00559         AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>& rMesh)
00560 {
00561     WriteNclFile(rMesh, true);
00562     this->WriteFilesUsingMeshReader(rMeshReader);
00563 }
00564 
00565 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00566 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingParallelMesh(bool keepOriginalElementIndexing)
00567 {
00568     if (keepOriginalElementIndexing)
00569     {
00570         // Master goes on to write as usual
00571         if (PetscTools::AmMaster())
00572         {
00573             this->WriteFiles();
00574         }
00575         else
00576         {
00577             double raw_coords[SPACE_DIM];
00578             // Slaves concentrate the Nodes
00579             typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00580             for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00581             {
00582                 for (unsigned j=0; j<SPACE_DIM; j++)
00583                 {
00584                     raw_coords[j] = it->GetPoint()[j];
00585                 }
00586                 MPI_Send(raw_coords, SPACE_DIM, MPI_DOUBLE, 0, it->GetIndex(), PETSC_COMM_WORLD);//Nodes sent with positive tags
00587             }
00588 
00589             // Slaves concentrate the Elements for which they are owners
00590             // +1 allows for attribute value
00591             unsigned raw_indices[mNodesPerElement+1]; // Assuming that we don't have parallel quadratic elements
00592             typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElementIterType;
00593             for (ElementIterType it = mpMesh->GetElementIteratorBegin(); it != mpMesh->GetElementIteratorEnd(); ++it)
00594             {
00595                 unsigned index =it->GetIndex();
00596                 if ( mpDistributedMesh->CalculateDesignatedOwnershipOfElement( index ) == true )
00597                 {
00598                     for (unsigned j=0; j<mNodesPerElement; j++)
00599                     {
00600                         raw_indices[j] = it->GetNodeGlobalIndex(j);
00601                     }
00602                     // Attribute value
00603                     raw_indices[mNodesPerElement] = it->GetUnsignedAttribute();
00604 
00605                     MPI_Send(raw_indices, mNodesPerElement+1, MPI_UNSIGNED, 0,
00606                              this->mNumNodes + index, //Elements sent with tags offset
00607                              PETSC_COMM_WORLD);
00608                 }
00609             }
00610 
00611             // Slaves concentrate the Faces for which they are owners (not in 1-d)
00612             if (ELEMENT_DIM != 1)
00613             {
00614                 unsigned raw_face_indices[ELEMENT_DIM]; // Assuming that we don't have parallel quadratic meshes
00615                 typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::BoundaryElementIterator BoundaryElementIterType;
00616                 for (BoundaryElementIterType it = mpMesh->GetBoundaryElementIteratorBegin(); it != mpMesh->GetBoundaryElementIteratorEnd(); ++it)
00617                 {
00618                     unsigned index =(*it)->GetIndex();
00619                     if ( mpDistributedMesh->CalculateDesignatedOwnershipOfBoundaryElement( index ) == true )
00620                     {
00621                         for (unsigned j=0; j<ELEMENT_DIM; j++)
00622                         {
00623                             raw_face_indices[j] = (*it)->GetNodeGlobalIndex(j);
00624                         }
00625                         MPI_Send(raw_face_indices, ELEMENT_DIM, MPI_UNSIGNED, 0,
00626                                  this->mNumNodes + this->mNumElements + index, //Faces sent with tags offset even more
00627                                  PETSC_COMM_WORLD);
00628                     }
00629                 }
00630             }
00631 
00632             // Slaves concentrate the cable elements for which they are owners
00633             if (mpMixedMesh)
00634             {
00635                 typedef typename MixedDimensionMesh<ELEMENT_DIM,SPACE_DIM>::CableElementIterator CableElementIterType;
00636                 for (CableElementIterType it = mpMixedMesh->GetCableElementIteratorBegin(); it != mpMixedMesh->GetCableElementIteratorEnd(); ++it)
00637                 {
00638                     unsigned index =(*it)->GetIndex();
00639                     if ( mpMixedMesh->CalculateDesignatedOwnershipOfCableElement( index ) == true )
00640                     {
00641                         unsigned raw_cable_element_indices[2];
00642                         for (unsigned j=0; j<2; j++)
00643                         {
00644                             raw_cable_element_indices[j] = (*it)->GetNodeGlobalIndex(j);
00645                         }
00646                         MPI_Send(raw_cable_element_indices, 2, MPI_UNSIGNED, 0,
00647                                  this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + index, //Cable elements sent with tags offset even more
00648                                  PETSC_COMM_WORLD);
00649                         double cable_radius = (*it)->GetAttribute();
00650                         //Assume this message doesn't overtake previous
00651                         MPI_Send(&cable_radius, 1, MPI_DOUBLE, 0,
00652                                  this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + index, //Cable elements sent with tags offset even more
00653                                  PETSC_COMM_WORLD);
00654                     }
00655                 }
00656             }
00657         }
00658         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingParallelMesh");
00659     }
00660     else
00661     {
00662         PetscTools::BeginRoundRobin();
00663 
00664         if (PetscTools::AmMaster())
00665         {
00666             // Make sure headers are written first
00667             assert(PetscTools::GetMyRank() == 0);
00668             CreateFilesWithHeaders();
00669         }
00670 
00671         AppendLocalDataToFiles();
00672 
00673         if (PetscTools::AmTopMost())
00674         {
00675             // Make sure footers are written last
00676             assert(PetscTools::GetMyRank() == PetscTools::GetNumProcs()-1);
00677             WriteFilesFooter();
00678         }
00679 
00680         PetscTools::EndRoundRobin();
00681     }
00682 }
00683 
00684 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00685 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::CreateFilesWithHeaders()
00686 {
00687     // If control reaches this line you haven't implemented the optimised
00688     // parallel write for whichever visualiser you are writing for.
00689     NEVER_REACHED;
00690 }
00691 
00692 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00693 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AppendLocalDataToFiles()
00694 {
00695     // If control reaches this line you haven't implemented the optimised
00696     // parallel write for whichever visualiser you are writing for.
00697     NEVER_REACHED;
00698 }
00699 
00700 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00701 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesFooter()
00702 {
00703     // If control reaches this line you haven't implemented the optimised
00704     // parallel write for whichever visualiser you are writing for.
00705     NEVER_REACHED;
00706 }
00707 
00709 // Explicit instantiation
00711 
00712 template class AbstractTetrahedralMeshWriter<1,1>;
00713 template class AbstractTetrahedralMeshWriter<1,2>;
00714 template class AbstractTetrahedralMeshWriter<1,3>;
00715 template class AbstractTetrahedralMeshWriter<2,2>;
00716 template class AbstractTetrahedralMeshWriter<2,3>;
00717 template class AbstractTetrahedralMeshWriter<3,3>;