AbstractTetrahedralMeshWriter.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2011
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 // Disable PETSc logging of MPI calls (we don't use this, anyway) to fix
00030 // "right-hand operand of comma has no effect" warnings when building with
00031 // PETSc 2.2.1.
00032 #define PETSC_HAVE_BROKEN_RECURSIVE_MACRO
00033 
00034 #include "AbstractTetrahedralMeshWriter.hpp"
00035 #include "AbstractTetrahedralMesh.hpp"
00036 #include "DistributedTetrahedralMesh.hpp"
00037 #include "MixedDimensionMesh.hpp"
00038 #include "Version.hpp"
00039 #include "Exception.hpp"
00040 
00041 #include <mpi.h> // For MPI_Send, MPI_Recv
00042 
00046 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00047 struct MeshWriterIterators
00048 {
00050     typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator* pNodeIter;
00052     typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator* pElemIter;
00054     typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::BoundaryElementIterator* pBoundaryElemIter;
00055 };
00056 
00058 // Implementation
00060 
00061 
00062 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00063 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AbstractTetrahedralMeshWriter(const std::string &rDirectory,
00064                    const std::string &rBaseName,
00065                    const bool clearOutputDir)
00066     : AbstractMeshWriter<ELEMENT_DIM, SPACE_DIM>(rDirectory, rBaseName, clearOutputDir),
00067       mpMesh(NULL),
00068       mpNodeMap(NULL),
00069       mNodesPerElement(ELEMENT_DIM+1),
00070       mNodesPerBoundaryElement(ELEMENT_DIM),
00071       mpDistributedMesh(NULL),
00072       mpMixedMesh(NULL),
00073       mpIters(new MeshWriterIterators<ELEMENT_DIM,SPACE_DIM>),
00074       mNodeCounterForParallelMesh(0),
00075       mElementCounterForParallelMesh(0),
00076       mBoundaryElementCounterForParallelMesh(0),
00077       mCableElementCounterForParallelMesh(0),
00078       mFilesAreBinary(false)
00079 {
00080     mpIters->pNodeIter = NULL;
00081     mpIters->pElemIter = NULL;
00082     mpIters->pBoundaryElemIter = NULL;
00083 }
00084 
00085 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00086 AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::~AbstractTetrahedralMeshWriter()
00087 {
00088     if (mpIters->pNodeIter)
00089     {
00090         delete mpIters->pNodeIter;
00091     }
00092     if (mpIters->pElemIter)
00093     {
00094         delete mpIters->pElemIter;
00095     }
00096     if (mpIters->pBoundaryElemIter)
00097     {
00098         delete mpIters->pBoundaryElemIter;
00099     }
00100 
00101     delete mpIters;
00102 
00103     if (mpNodeMap)
00104     {
00105         delete mpNodeMap;
00106     }
00107 }
00108 
00109 
00110 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00111 std::vector<double> AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextNode()
00112 {
00113     // if we are writing from a mesh..
00114     assert(PetscTools::AmMaster());
00115     if (mpMesh)
00116     {
00117         std::vector<double> coords(SPACE_DIM);
00118         double raw_coords[SPACE_DIM];
00119 
00120         //Iterate over the locally-owned nodes
00121         if ( (*(mpIters->pNodeIter)) != mpMesh->GetNodeIteratorEnd())
00122         {
00123             // Either this is a sequential mesh (and we own it all)
00124             // or it's parallel (and the master owns the first chunk)
00125             for (unsigned j=0; j<SPACE_DIM; j++)
00126             {
00127                 coords[j] = (*(mpIters->pNodeIter))->GetPoint()[j];
00128             }
00129 
00130             mNodeCounterForParallelMesh=(*(mpIters->pNodeIter))->GetIndex() + 1;//Ready for when we run out of local nodes
00131 
00132             ++(*(mpIters->pNodeIter));
00133             return coords;
00134         }
00135 
00136         // If we didn't return then the iterator has reached the end of the local nodes.
00137         // It must be a parallel mesh and we are expecting messages...
00138 
00139         assert( mpDistributedMesh != NULL );
00140 
00141         MPI_Status status;
00142         // do receive, convert to std::vector on master
00143         MPI_Recv(raw_coords, SPACE_DIM, MPI_DOUBLE, MPI_ANY_SOURCE, mNodeCounterForParallelMesh, PETSC_COMM_WORLD, &status);
00144         assert(status.MPI_ERROR == MPI_SUCCESS);
00145         for (unsigned j=0; j<coords.size(); j++)
00146         {
00147             coords[j] = raw_coords[j];
00148         }
00149 
00150         mNodeCounterForParallelMesh++;
00151         return coords;
00152     }
00153     else
00154     {
00155         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextNode();
00156     }
00157 }
00158 
00159 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00160 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextElement()
00161 {
00162     assert(PetscTools::AmMaster());
00163     // if we are writing from a mesh..
00164     if (mpMesh)
00165     {
00166         ElementData elem_data;
00167         elem_data.NodeIndices.resize(mNodesPerElement);
00168 
00169         if ( mpDistributedMesh == NULL ) // not using parallel mesh
00170         {
00171             // Use the iterator
00172             assert(this->mNumElements==mpMesh->GetNumElements());
00173 
00174             for (unsigned j=0; j<elem_data.NodeIndices.size(); j++)
00175             {
00176                 unsigned old_index = (*(mpIters->pElemIter))->GetNodeGlobalIndex(j);
00177                 elem_data.NodeIndices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00178             }
00179             // Set attribute
00180             elem_data.AttributeValue = (*(mpIters->pElemIter))->GetRegion();
00181 
00182             ++(*(mpIters->pElemIter));
00183 
00184             return elem_data;
00185         }
00186         else //Parallel mesh
00187         {
00188             //Use the mElementCounterForParallelMesh variable to identify next element
00189             if ( mpDistributedMesh->CalculateDesignatedOwnershipOfElement( mElementCounterForParallelMesh ) == true )
00190             {
00191                 //Master owns this element
00192                 Element<ELEMENT_DIM, SPACE_DIM>* p_element = mpDistributedMesh->GetElement(mElementCounterForParallelMesh);
00193                 assert(elem_data.NodeIndices.size() == mNodesPerElement);
00194                 assert( ! p_element->IsDeleted() );
00195                 //Master can use the local data to recover node indices & attribute
00196                 for (unsigned j=0; j<mNodesPerElement; j++)
00197                 {
00198                     elem_data.NodeIndices[j] = p_element->GetNodeGlobalIndex(j);
00199                 }
00200                 elem_data.AttributeValue = p_element->GetRegion();
00201             }
00202             else
00203             {
00204                 //Master doesn't own this element.
00205                 // +1 to allow for attribute value too
00206                 unsigned raw_indices[mNodesPerElement+1];
00207                 MPI_Status status;
00208                 //Get it from elsewhere
00209                 MPI_Recv(raw_indices, mNodesPerElement+1, MPI_UNSIGNED, MPI_ANY_SOURCE,
00210                          this->mNumNodes + mElementCounterForParallelMesh,
00211                          PETSC_COMM_WORLD, &status);
00212                 // Convert to std::vector
00213                 for (unsigned j=0; j< elem_data.NodeIndices.size(); j++)
00214                 {
00215                     elem_data.NodeIndices[j] = raw_indices[j];
00216                 }
00217 
00218                 // Attribute value
00219                 elem_data.AttributeValue = raw_indices[mNodesPerElement];
00220             }
00221             // increment element counter
00222             mElementCounterForParallelMesh++;
00223 
00224             return elem_data; // non-master processors will return garbage here - but they should never write to file
00225         }
00226     }
00227     else // not writing from a mesh
00228     {
00229         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextElement();
00230     }
00231 }
00232 
00233 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00234 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextBoundaryElement()
00235 {
00236     assert(PetscTools::AmMaster());
00237     // if we are writing from a mesh..
00238     if (mpMesh)
00239     {
00240         ElementData boundary_elem_data;
00241         boundary_elem_data.NodeIndices.resize(mNodesPerBoundaryElement);
00242 
00243         if ( mpDistributedMesh == NULL ) // not using parallel mesh
00244         {
00245             // Use the iterator
00246             assert(this->mNumBoundaryElements==mpMesh->GetNumBoundaryElements());
00247 
00248             for (unsigned j=0; j<boundary_elem_data.NodeIndices.size(); j++)
00249             {
00250                 unsigned old_index = (*(*(mpIters->pBoundaryElemIter)))->GetNodeGlobalIndex(j);
00251                 boundary_elem_data.NodeIndices[j] = mpMesh->IsMeshChanging() ? mpNodeMap->GetNewIndex(old_index) : old_index;
00252             }
00253 
00254             ++(*(mpIters->pBoundaryElemIter));
00255 
00256             return boundary_elem_data;
00257         }
00258         else //Parallel mesh
00259         {
00260             //Use the mElementCounterForParallelMesh variable to identify next element
00261             if ( mpDistributedMesh->CalculateDesignatedOwnershipOfBoundaryElement( mBoundaryElementCounterForParallelMesh ) == true )
00262             {
00263                 //Master owns this boundary element
00264                 BoundaryElement<ELEMENT_DIM-1, SPACE_DIM>* p_boundary_element = mpDistributedMesh->GetBoundaryElement(mBoundaryElementCounterForParallelMesh);
00265                 assert(boundary_elem_data.NodeIndices.size() == ELEMENT_DIM);
00266                 assert( ! p_boundary_element->IsDeleted() );
00267                 //Master can use the local data to recover node indices & attribute
00268                 for (unsigned j=0; j<ELEMENT_DIM; j++)
00269                 {
00270                     boundary_elem_data.NodeIndices[j] = p_boundary_element->GetNodeGlobalIndex(j);
00271                 }
00272             }
00273             else
00274             {
00275                 //Master doesn't own this boundary element.
00276                 unsigned raw_indices[ELEMENT_DIM];
00277                 MPI_Status status;
00278                 //Get it from elsewhere
00279                 MPI_Recv(raw_indices, ELEMENT_DIM, MPI_UNSIGNED, MPI_ANY_SOURCE,
00280                          this->mNumNodes + this->mNumElements + mBoundaryElementCounterForParallelMesh,
00281                          PETSC_COMM_WORLD, &status);
00282                 // Convert to std::vector
00283                 for (unsigned j=0; j< boundary_elem_data.NodeIndices.size(); j++)
00284                 {
00285                     boundary_elem_data.NodeIndices[j] = raw_indices[j];
00286                 }
00287             }
00288             // increment element counter
00289             mBoundaryElementCounterForParallelMesh++;
00290 
00291             return boundary_elem_data; // non-master processors will return garbage here - but they should never write to file
00292         }
00293     }
00294     else // not writing from a mesh
00295     {
00296         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextBoundaryElement();
00297     }
00298 }
00299 
00300 
00301 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00302 ElementData AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::GetNextCableElement()
00303 {
00304     assert(PetscTools::AmMaster());
00305 
00306     // if we are writing from a mesh..
00307     if (mpMesh)
00308     {
00309         // Need to be using a MixedDimensionMesh or there will be no cable data
00310         assert(mpMixedMesh);
00311 
00312         ElementData elem_data;
00313         elem_data.NodeIndices.resize(2);
00314 
00315         //Use the mCableElementCounterForParallelMesh variable to identify next element
00316         if ( mpMixedMesh->CalculateDesignatedOwnershipOfCableElement( mCableElementCounterForParallelMesh ) == true )
00317         {
00318             //Master owns this element
00319             Element<1, SPACE_DIM>* p_element = mpMixedMesh->GetCableElement(mCableElementCounterForParallelMesh);
00320             assert( ! p_element->IsDeleted() );
00321             //Master can use the local data to recover node indices & attribute
00322             for (unsigned j=0; j<2; j++)
00323             {
00324                 elem_data.NodeIndices[j] = p_element->GetNodeGlobalIndex(j);
00325             }
00326             elem_data.AttributeValue = p_element->GetRegion();
00327         }
00328         else
00329         {
00330             //Master doesn't own this element.
00331             unsigned raw_indices[2];
00332             MPI_Status status;
00333             //Get it from elsewhere
00334             MPI_Recv(raw_indices, 2, MPI_UNSIGNED, MPI_ANY_SOURCE,
00335                      this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + mCableElementCounterForParallelMesh,
00336                      PETSC_COMM_WORLD, &status);
00337 
00338             // Convert to ElementData (2 nodes plus an attribute value)
00339             for (unsigned j=0; j< 2; j++)
00340             {
00341                 elem_data.NodeIndices[j] = raw_indices[j];
00342             }
00343             // Attribute value
00344             double radius;
00345             MPI_Recv(&radius, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
00346                      this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + mCableElementCounterForParallelMesh,
00347                      PETSC_COMM_WORLD, &status);
00348             elem_data.AttributeValue = radius;
00349         }
00350         // increment element counter
00351         mCableElementCounterForParallelMesh++;
00352 
00353         return elem_data; // non-master processors will return garbage here - but they should never write to file
00354     }
00355     else // not writing from a mesh
00356     {
00357         return AbstractMeshWriter<ELEMENT_DIM,SPACE_DIM>::GetNextCableElement();
00358     }
00359 }
00360 
00361 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00362 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteNclFile(
00363         AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>& rMesh,
00364         bool invertMeshPermutation)
00365 {
00366     unsigned max_elements_all;
00367     if (PetscTools::IsSequential())
00368     {
00369         max_elements_all = rMesh.CalculateMaximumContainingElementsPerProcess();
00370     }
00371     else
00372     {
00373         unsigned max_elements_per_process = rMesh.CalculateMaximumContainingElementsPerProcess();
00374         MPI_Allreduce(&max_elements_per_process, &max_elements_all, 1, MPI_UNSIGNED, MPI_MAX, PETSC_COMM_WORLD);
00375     }
00376 
00377     std::string node_connect_list_file_name = this->mBaseName + ".ncl";
00378     if (invertMeshPermutation && !rMesh.rGetNodePermutation().empty())
00379     {
00380         node_connect_list_file_name += "-tmp";
00381     }
00382 
00383     PetscTools::BeginRoundRobin();
00384     {
00385         out_stream p_ncl_file=out_stream(NULL);
00386 
00387         if (PetscTools::AmMaster())
00388         {
00389             //Open the file for the first time
00390             p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(node_connect_list_file_name);
00391 
00392             // Write the ncl header
00393             *p_ncl_file << rMesh.GetNumNodes() << "\t";
00394             *p_ncl_file << max_elements_all << "\t";
00395             *p_ncl_file << "\tBIN\n";
00396         }
00397         else
00398         {
00399             // Append to the existing file
00400             p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(node_connect_list_file_name, std::ios::app);
00401         }
00402 
00403         // Write each node's data
00404         unsigned default_marker = UINT_MAX;
00405 
00406         typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00407         for (NodeIterType iter = rMesh.GetNodeIteratorBegin();
00408              iter != rMesh.GetNodeIteratorEnd();
00409              ++iter)
00410         {
00411             // Get the containing element indices from the node's set and sort them
00412             std::set<unsigned>& r_elem_set = iter->rGetContainingElementIndices();
00413             std::vector<unsigned> elem_vector(r_elem_set.begin(), r_elem_set.end());
00414             std::sort(elem_vector.begin(), elem_vector.end());
00415             // Pad the vector with unsigned markers
00416             for (unsigned elem_index=elem_vector.size();  elem_index<max_elements_all; elem_index++)
00417             {
00418                 elem_vector.push_back(default_marker);
00419             }
00420             assert (elem_vector.size() == max_elements_all);
00421             // Write raw data out of std::vector into the file
00422             p_ncl_file->write((char*)&elem_vector[0], elem_vector.size()*sizeof(unsigned));
00423         }
00424 
00425         if (PetscTools::AmTopMost())
00426         {
00427             *p_ncl_file << "#\n# " + ChasteBuildInfo::GetProvenanceString();
00428         }
00429 
00430         p_ncl_file->close();
00431     }
00432     PetscTools::EndRoundRobin();
00433 
00434     if (invertMeshPermutation && PetscTools::AmMaster() && !rMesh.rGetNodePermutation().empty())
00435     {
00436         // Open files
00437         const std::string real_node_connect_list_file_name = this->mBaseName + ".ncl";
00438         out_stream p_ncl_file = this->mpOutputFileHandler->OpenOutputFile(real_node_connect_list_file_name);
00439         FileFinder temp_ncl_path = this->mpOutputFileHandler->FindFile(node_connect_list_file_name);
00440         std::ifstream temp_ncl_file(temp_ncl_path.GetAbsolutePath().c_str());
00441         // Copy the header
00442         std::string header_line;
00443         getline(temp_ncl_file, header_line);
00444         (*p_ncl_file) << header_line << "\n";
00445         const std::streampos data_start = temp_ncl_file.tellg();
00446         const std::streamoff item_width = max_elements_all * sizeof(unsigned);
00447         // Copy the binary data, permuted
00448         std::vector<unsigned> elem_vector(max_elements_all);
00449         for (unsigned node_index=0; node_index<rMesh.GetNumAllNodes(); node_index++)
00450         {
00451             unsigned permuted_index = rMesh.rGetNodePermutation()[node_index];
00452             temp_ncl_file.seekg(data_start + item_width * permuted_index, std::ios_base::beg);
00453             temp_ncl_file.read((char*)&elem_vector[0], max_elements_all*sizeof(unsigned));
00454             p_ncl_file->write((char*)&elem_vector[0], max_elements_all*sizeof(unsigned));
00455         }
00456         // Footer
00457         *p_ncl_file << "#\n# " + ChasteBuildInfo::GetProvenanceString();
00458         p_ncl_file->close();
00459         // Remove temp file
00460         remove(temp_ncl_path.GetAbsolutePath().c_str());
00461     }
00462     PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteNclFile");
00463 }
00464 
00466 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00467 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMesh(
00468       AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh,
00469       bool keepOriginalElementIndexing)
00470 {
00471     this->mpMeshReader = NULL;
00472     mpMesh = &rMesh;
00473 
00474     this->mNumNodes = mpMesh->GetNumNodes();
00475     this->mNumElements = mpMesh->GetNumElements();
00476     this->mNumBoundaryElements = mpMesh->GetNumBoundaryElements();
00477     this->mNumCableElements = mpMesh->GetNumCableElements();
00478 
00479     typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00480     mpIters->pNodeIter = new NodeIterType(mpMesh->GetNodeIteratorBegin());
00481 
00482     typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElemIterType;
00483     mpIters->pElemIter = new ElemIterType(mpMesh->GetElementIteratorBegin());
00484 
00485     typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::BoundaryElementIterator BoundaryElemIterType;
00486     mpIters->pBoundaryElemIter = new BoundaryElemIterType(mpMesh->GetBoundaryElementIteratorBegin());
00487 
00488     //Use this process's first element to gauge the size of all the elements
00489     if ( (*(mpIters->pElemIter)) != mpMesh->GetElementIteratorEnd())
00490     {
00491         mNodesPerElement = (*(mpIters->pElemIter))->GetNumNodes();
00492     }
00493 
00494     //Use this process's first boundary element to gauge the size of all the boundary elements
00495     if ( (*(mpIters->pBoundaryElemIter)) != mpMesh->GetBoundaryElementIteratorEnd())
00496     {
00497         mNodesPerBoundaryElement = (*(*(mpIters->pBoundaryElemIter)))->GetNumNodes();
00498     }
00499 
00500     //Connectivity file is written when we write to a binary file (only available for TrianglesMeshWriter) and if we are preserving the element order
00501     if (this->mFilesAreBinary && keepOriginalElementIndexing)
00502     {
00503         WriteNclFile(*mpMesh);
00504     }
00505 
00506     // Have we got a parallel mesh?
00508     mpDistributedMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh);
00509 
00510     // Have we got a MixedDimensionMesh?
00512     mpMixedMesh = dynamic_cast<MixedDimensionMesh<ELEMENT_DIM,SPACE_DIM>* >(this->mpMesh);
00513 
00514     if (mpDistributedMesh != NULL)
00515     {
00516         // It's a parallel mesh
00517         WriteFilesUsingParallelMesh(keepOriginalElementIndexing);
00518         return;
00519     }
00520 
00521     if (!PetscTools::AmMaster())
00522     {
00523         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); //Paired with Master process writing files
00524         return;
00525     }
00526 
00527     // Set up node map if we might have deleted nodes
00528     unsigned node_map_index = 0;
00529     if (mpMesh->IsMeshChanging())
00530     {
00531         mpNodeMap = new NodeMap(mpMesh->GetNumAllNodes());
00532         for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00533         {
00534             mpNodeMap->SetNewIndex(it->GetIndex(), node_map_index++);
00535         }
00536     }
00537 
00538     this->WriteFiles();
00539     PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingMesh"); // Paired with waiting Slave processes
00540     delete mpIters->pNodeIter;
00541     mpIters->pNodeIter = NULL;
00542     delete mpIters->pElemIter;
00543     mpIters->pElemIter = NULL;
00544     delete mpIters->pBoundaryElemIter;
00545     mpIters->pBoundaryElemIter = NULL;
00546 }
00547 
00548 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00549 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMeshReaderAndMesh(
00550         AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00551         AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>& rMesh)
00552 {
00553     WriteNclFile(rMesh, true);
00554     WriteFilesUsingMeshReader(rMeshReader);
00555 }
00556 
00557 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00558 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingParallelMesh(bool keepOriginalElementIndexing)
00559 {
00560     if (keepOriginalElementIndexing)
00561     {
00562         // Master goes on to write as usual
00563         if (PetscTools::AmMaster())
00564         {
00565             this->WriteFiles();
00566         }
00567         else
00568         {
00569             double raw_coords[SPACE_DIM];
00570             // Slaves concentrate the Nodes
00571             typedef typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator NodeIterType;
00572             for (NodeIterType it = mpMesh->GetNodeIteratorBegin(); it != mpMesh->GetNodeIteratorEnd(); ++it)
00573             {
00574                 for (unsigned j=0; j<SPACE_DIM; j++)
00575                 {
00576                     raw_coords[j] = it->GetPoint()[j];
00577                 }
00578                 MPI_Send(raw_coords, SPACE_DIM, MPI_DOUBLE, 0, it->GetIndex(), PETSC_COMM_WORLD);//Nodes sent with positive tags
00579             }
00580 
00581             // Slaves concentrate the Elements for which they are owners
00582             // +1 allows for attribute value
00583             unsigned raw_indices[mNodesPerElement+1]; // Assuming that we don't have parallel quadratic elements
00584             typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator ElementIterType;
00585             for (ElementIterType it = mpMesh->GetElementIteratorBegin(); it != mpMesh->GetElementIteratorEnd(); ++it)
00586             {
00587                 unsigned index =it->GetIndex();
00588                 if ( mpDistributedMesh->CalculateDesignatedOwnershipOfElement( index ) == true )
00589                 {
00590                     for (unsigned j=0; j<mNodesPerElement; j++)
00591                     {
00592                         raw_indices[j] = it->GetNodeGlobalIndex(j);
00593                     }
00594                     // Attribute value
00595                     raw_indices[mNodesPerElement] = it->GetRegion();
00596 
00597                     MPI_Send(raw_indices, mNodesPerElement+1, MPI_UNSIGNED, 0,
00598                              this->mNumNodes + index, //Elements sent with tags offset
00599                              PETSC_COMM_WORLD);
00600                 }
00601             }
00602 
00603             // Slaves concentrate the Faces for which they are owners (not in 1-d)
00604             if (ELEMENT_DIM != 1)
00605             {
00606                 unsigned raw_face_indices[ELEMENT_DIM]; // Assuming that we don't have parallel quadratic meshes
00607                 typedef typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::BoundaryElementIterator BoundaryElementIterType;
00608                 for (BoundaryElementIterType it = mpMesh->GetBoundaryElementIteratorBegin(); it != mpMesh->GetBoundaryElementIteratorEnd(); ++it)
00609                 {
00610                     unsigned index =(*it)->GetIndex();
00611                     if ( mpDistributedMesh->CalculateDesignatedOwnershipOfBoundaryElement( index ) == true )
00612                     {
00613                         for (unsigned j=0; j<ELEMENT_DIM; j++)
00614                         {
00615                             raw_face_indices[j] = (*it)->GetNodeGlobalIndex(j);
00616                         }
00617                         MPI_Send(raw_face_indices, ELEMENT_DIM, MPI_UNSIGNED, 0,
00618                                  this->mNumNodes + this->mNumElements + index, //Faces sent with tags offset even more
00619                                  PETSC_COMM_WORLD);
00620                     }
00621                 }
00622             }
00623 
00624             // Slaves concentrate the cable elements for which they are owners
00625             if (mpMixedMesh)
00626             {
00627                 typedef typename MixedDimensionMesh<ELEMENT_DIM,SPACE_DIM>::CableElementIterator CableElementIterType;
00628                 for (CableElementIterType it = mpMixedMesh->GetCableElementIteratorBegin(); it != mpMixedMesh->GetCableElementIteratorEnd(); ++it)
00629                 {
00630                     unsigned index =(*it)->GetIndex();
00631                     if ( mpMixedMesh->CalculateDesignatedOwnershipOfCableElement( index ) == true )
00632                     {
00633                         unsigned raw_cable_element_indices[2];
00634                         for (unsigned j=0; j<2; j++)
00635                         {
00636                             raw_cable_element_indices[j] = (*it)->GetNodeGlobalIndex(j);
00637                         }
00638                         MPI_Send(raw_cable_element_indices, 2, MPI_UNSIGNED, 0,
00639                                  this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + index, //Cable elements sent with tags offset even more
00640                                  PETSC_COMM_WORLD);
00641                         double cable_radius = (*it)->GetRegion();
00642                         //Assume this message doesn't overtake previous
00643                         MPI_Send(&cable_radius, 1, MPI_DOUBLE, 0,
00644                                  this->mNumNodes + this->mNumElements + this->mNumBoundaryElements + index, //Cable elements sent with tags offset even more
00645                                  PETSC_COMM_WORLD);
00646                     }
00647                 }
00648             }
00649         }
00650         PetscTools::Barrier("AbstractTetrahedralMeshWriter::WriteFilesUsingParallelMesh");
00651     }
00652     else
00653     {
00654         PetscTools::BeginRoundRobin();
00655 
00656         if (PetscTools::AmMaster())
00657         {
00658             // Make sure headers are written first
00659             assert(PetscTools::GetMyRank() == 0);
00660             CreateFilesWithHeaders();
00661         }
00662 
00663         AppendLocalDataToFiles();
00664 
00665         if (PetscTools::AmTopMost())
00666         {
00667             // Make sure footers are written last
00668             assert(PetscTools::GetMyRank() == PetscTools::GetNumProcs()-1);
00669             WriteFilesFooter();
00670         }
00671 
00672         PetscTools::EndRoundRobin();
00673     }
00674 }
00675 
00676 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00677 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::CreateFilesWithHeaders()
00678 {
00679     // If control reaches this line you haven't implemented the optimised
00680     // parallel write for whichever visualiser you are writing for.
00681     NEVER_REACHED;
00682 }
00683 
00684 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00685 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::AppendLocalDataToFiles()
00686 {
00687     // If control reaches this line you haven't implemented the optimised
00688     // parallel write for whichever visualiser you are writing for.
00689     NEVER_REACHED;
00690 }
00691 
00692 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00693 void AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesFooter()
00694 {
00695     // If control reaches this line you haven't implemented the optimised
00696     // parallel write for whichever visualiser you are writing for.
00697     NEVER_REACHED;
00698 }
00699 
00701 // Explicit instantiation
00703 
00704 template class AbstractTetrahedralMeshWriter<1,1>;
00705 template class AbstractTetrahedralMeshWriter<1,2>;
00706 template class AbstractTetrahedralMeshWriter<1,3>;
00707 template class AbstractTetrahedralMeshWriter<2,2>;
00708 template class AbstractTetrahedralMeshWriter<2,3>;
00709 template class AbstractTetrahedralMeshWriter<3,3>;
Generated on Thu Dec 22 13:00:16 2011 for Chaste by  doxygen 1.6.3