ParallelTetrahedralMesh.hpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2009
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 #ifndef PARALLELTETRAHEDRALMESH_HPP_
00030 #define PARALLELTETRAHEDRALMESH_HPP_
00031 
00032 #include "AbstractMesh.hpp"
00033 #include "AbstractMeshReader.hpp"
00034 #include "Element.hpp"
00035 #include "BoundaryElement.hpp"
00036 #include "Node.hpp"
00037 #include "DistributedVector.hpp"
00038 #include "PetscTools.hpp"
00039 #include "OutputFileHandler.hpp"
00040 
00041 /*
00042  *  The following definition fixes an odd incompatibility of METIS 4.0 and Chaste. Since 
00043  * the library was compiled with a plain-C compiler, it fails to link using a C++ compiler.
00044  * Note that METIS 4.0 fails to compile with g++ or icpc, so a C compiler should be used.
00045  *  
00046  * Somebody had this problem before: http://www-users.cs.umn.edu/~karypis/.discus/messages/15/113.html?1119486445 
00047  *
00048  * Note that it is necessary to define the function header before the #include statement. 
00049 */ 
00050 extern "C" {
00051 extern void METIS_PartMeshNodal(int*, int*, int*, int*, int*, int*, int*, int*, int*);
00052 };
00053 #include "metis.h"
00054 
00055  
00059 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00060 class ParallelTetrahedralMesh : public AbstractMesh< ELEMENT_DIM, SPACE_DIM>
00061 {
00062     
00063     friend class TestParallelTetrahedralMesh;
00064 
00065 public:
00066 
00067     typedef enum
00068     {
00069         DUMB=0,
00070         METIS_BINARY,
00071         METIS_LIBRARY
00072     } PartitionType;            
00073     
00074 private: 
00075 
00076     unsigned mTotalNumElements;
00077     unsigned mTotalNumBoundaryElements; 
00078     unsigned mTotalNumNodes;
00079     
00080     std::vector<Node<SPACE_DIM>* > mHaloNodes;
00081     
00082     std::map<unsigned, unsigned> mNodesMapping;
00083     std::map<unsigned, unsigned> mHaloNodesMapping;    
00084     std::map<unsigned, unsigned> mElementsMapping;
00085     std::map<unsigned, unsigned> mBoundaryElementsMapping;        
00086     
00087     PartitionType mMetisPartitioning;
00088         
00089 public:        
00090 
00091     ParallelTetrahedralMesh(PartitionType metisPartitioning=METIS_LIBRARY);
00092 
00093     virtual ~ParallelTetrahedralMesh();
00094 
00095     void ConstructFromMeshReader(AbstractMeshReader<ELEMENT_DIM,SPACE_DIM> &rMeshReader,
00096                                  bool cullInternalFaces=false);
00097 
00098     unsigned GetNumLocalNodes() const;
00099     unsigned GetNumLocalElements() const;
00100 
00101     unsigned GetNumNodes() const;
00102     unsigned GetNumElements() const;    
00103     unsigned GetNumBoundaryElements() const;    
00104     
00105     void SetElementOwnerships(unsigned lo, unsigned hi);
00106 
00107 private:
00108 
00109     void RegisterNode(unsigned index);
00110     void RegisterHaloNode(unsigned index);
00111     void RegisterElement(unsigned index);
00112     void RegisterBoundaryElement(unsigned index);
00113 
00114     unsigned SolveNodeMapping(unsigned index) const;
00115     unsigned SolveHaloNodeMapping(unsigned index);
00116     unsigned SolveElementMapping(unsigned index) const;            
00117     unsigned SolveBoundaryElementMapping(unsigned index) const;
00118 
00119     void ComputeMeshPartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00120                                std::set<unsigned>& rNodesOwned,
00121                                std::set<unsigned>& rHaloNodesOwned,
00122                                std::set<unsigned>& rElementsOwned,
00123                                std::vector<unsigned>& rProcessorsOffset,
00124                                std::vector<unsigned>& rNodePermutation);    
00125     
00126     void DumbNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00127                               std::set<unsigned>& rNodesOwned);
00128 
00129     void MetisBinaryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00130                                      std::set<unsigned>& rNodesOwned, std::vector<unsigned>& rProcessorsOffset,
00131                                      std::vector<unsigned>& rNodePermutation);
00132 
00133     void MetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00134                                      std::set<unsigned>& rNodesOwned, std::vector<unsigned>& rProcessorsOffset,
00135                                      std::vector<unsigned>& rNodePermutation);
00136                                      
00137     void ReorderNodes(std::vector<unsigned>& rNodePermutation);    
00138 };
00139 
00140 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00141 ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ParallelTetrahedralMesh(PartitionType metisPartitioning)
00142     : mMetisPartitioning(metisPartitioning)
00143 {
00144 }
00145 
00146 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00147 ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::~ParallelTetrahedralMesh()
00148 {
00149     for (unsigned i=0; i<this->mHaloNodes.size(); i++)
00150     {
00151         delete this->mHaloNodes[i];
00152     }
00153 }
00154 
00155 
00156 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00157 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ComputeMeshPartitioning(
00158     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00159     std::set<unsigned>& rNodesOwned,
00160     std::set<unsigned>& rHaloNodesOwned,
00161     std::set<unsigned>& rElementsOwned,
00162     std::vector<unsigned>& rProcessorsOffset,
00163     std::vector<unsigned>& rNodePermutation)
00164 {
00166     
00167     if (mMetisPartitioning==METIS_BINARY && PetscTools::NumProcs() > 1)
00168     {
00169         MetisBinaryNodePartitioning(rMeshReader, rNodesOwned, rProcessorsOffset, rNodePermutation);                 
00170     }
00171     else if (mMetisPartitioning==METIS_LIBRARY && PetscTools::NumProcs() > 1)
00172     {
00173         MetisLibraryNodePartitioning(rMeshReader, rNodesOwned, rProcessorsOffset, rNodePermutation);
00174     }
00175     else    
00176     {
00177         DumbNodePartitioning(rMeshReader, rNodesOwned);
00178     }
00179         
00180     for(unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00181     {
00182         ElementData element_data = rMeshReader.GetNextElementData();
00183 
00184         bool element_owned = false;
00185         std::set<unsigned> temp_halo_nodes;
00186         
00187         for(unsigned i=0; i<ELEMENT_DIM+1; i++)
00188         {
00189             if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
00190             {
00191                 element_owned = true;
00192                 rElementsOwned.insert(element_number);
00193             }
00194             else
00195             {
00196                 temp_halo_nodes.insert(element_data.NodeIndices[i]);
00197             }
00198         }
00199         
00200         if (element_owned)
00201         {
00202             rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
00203         }                        
00204     }
00205     
00206     rMeshReader.Reset();
00207 }
00208 
00209 
00210 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00211 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructFromMeshReader(
00212     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00213     bool cullInternalFaces)
00214 {
00215     
00216     if(ELEMENT_DIM==1)
00217     {
00218         cullInternalFaces = true;
00219     }
00220     
00221     // ticket #922: Face culling is not supported in parallel yet
00222     assert(!cullInternalFaces);    
00223     
00224     this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
00225     mTotalNumElements = rMeshReader.GetNumElements();
00226     mTotalNumNodes = rMeshReader.GetNumNodes();
00227     mTotalNumBoundaryElements = rMeshReader.GetNumFaces();   
00228         
00229     std::set<unsigned> nodes_owned;
00230     std::set<unsigned> halo_nodes_owned;
00231     std::set<unsigned> elements_owned;
00232     std::vector<unsigned> proc_offsets;//(PetscTools::NumProcs());    
00233     
00234     ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets, this->mNodesPermutation);
00235     
00236     // Reserve memory
00237     this->mElements.reserve(elements_owned.size());
00238     this->mNodes.reserve(nodes_owned.size());
00239         
00240     // Load the nodes owned by the processor
00241     std::vector<double> coords;
00242     for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
00243     {
00245         coords = rMeshReader.GetNextNode();
00246         
00247         // The node is owned by the processor
00248         if (nodes_owned.find(node_index) != nodes_owned.end())
00249         {
00250             RegisterNode(node_index);
00251             this->mNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00252             continue;
00253         }
00254 
00255         // The node is a halo node in this processor
00256         if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
00257         {
00258             RegisterHaloNode(node_index);
00259             mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00260         }
00261 
00262     }
00263 
00264     // Load the elements owned by the processor
00265     for (unsigned element_index=0; element_index < mTotalNumElements; element_index++)
00266     {
00267         ElementData element_data = rMeshReader.GetNextElementData();
00268 
00269         // The element is owned by the processor
00270         if (elements_owned.find(element_index) != elements_owned.end())
00271         {
00272             std::vector<Node<SPACE_DIM>*> nodes;
00273             unsigned node_local_index;
00274             for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00275             {
00276                 if (nodes_owned.find(element_data.NodeIndices[j]) != nodes_owned.end())
00277                 {
00278                     node_local_index = SolveNodeMapping(element_data.NodeIndices[j]);
00279                     nodes.push_back(this->mNodes[node_local_index]);
00280                 }
00281                 else
00282                 {
00283                     node_local_index = SolveHaloNodeMapping(element_data.NodeIndices[j]);
00284                     nodes.push_back(this->mHaloNodes[node_local_index]);
00285                 }                    
00286             }
00287             
00288             RegisterElement(element_index);
00289             
00290             Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(element_index, nodes);
00291             this->mElements.push_back(p_element);
00292             
00293             if (rMeshReader.GetNumElementAttributes() > 0)
00294             {
00295                 assert(rMeshReader.GetNumElementAttributes() == 1);
00296                 unsigned attribute_value = element_data.AttributeValue;
00297                 p_element->SetRegion(attribute_value);
00298             }
00299         }
00300     }
00301     
00302     // Boundary nodes and elements    
00303     unsigned actual_face_index = 0; 
00304     for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
00305     {
00306         ElementData face_data = rMeshReader.GetNextFaceData();
00307         std::vector<unsigned> node_indices = face_data.NodeIndices;
00308 
00309         bool own = false;
00310 
00311         for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00312         {
00313             // if I own this node
00314             if(mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
00315             {
00316                 own = true;
00317             }
00318         }
00319         
00320         if (!own )
00321         {
00322             // ticket #922: If we are culling internal faces we need to check if this is an external one before incrementing the index. 
00323             //              This turned to be tricky in parallel... since you can't check it in faces you don't own.  
00324             assert(!cullInternalFaces);
00325             actual_face_index++; 
00326             continue;
00327         }            
00328         
00329         bool is_boundary_face = true;
00330 
00331         // Determine if this is a boundary face
00332         std::set<unsigned> containing_element_indices; // Elements that contain this face
00333         std::vector<Node<SPACE_DIM>*> nodes;
00334 
00335         for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00336         {
00337             // if I own this node
00338             if(mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
00339             {
00340                 // Add Node pointer to list for creating an element
00341                 unsigned node_local_index = SolveNodeMapping(node_indices[node_index]);
00342                 nodes.push_back(this->mNodes[node_local_index]);
00343             }
00344 
00345             // if I halo-own this node
00346             if(mHaloNodesMapping.find(node_indices[node_index]) != mHaloNodesMapping.end())
00347             {
00348                 // Add Node pointer to list for creating an element
00349                 unsigned node_local_index = SolveHaloNodeMapping(node_indices[node_index]);                
00350                 nodes.push_back(this->mHaloNodes[node_local_index]); 
00351             }
00352             
00353         }
00354        
00355         if (is_boundary_face)
00356         {
00357             // This is a boundary face
00358             // Ensure all its nodes are marked as boundary nodes
00359             for (unsigned j=0; j<nodes.size(); j++)
00360             {
00361                 if (!nodes[j]->IsBoundaryNode())
00362                 {
00363                     nodes[j]->SetAsBoundaryNode();
00364                     this->mBoundaryNodes.push_back(nodes[j]);
00365                 }
00366                 //Register the index that this bounday element will have
00367                 //with the node
00368                 nodes[j]->AddBoundaryElement(actual_face_index);
00369             }
00370 
00371             RegisterBoundaryElement(actual_face_index);
00372             BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>* p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(actual_face_index, nodes);
00373             this->mBoundaryElements.push_back(p_boundary_element);
00374             
00375             if (rMeshReader.GetNumFaceAttributes() > 0)
00376             {
00377                 assert(rMeshReader.GetNumFaceAttributes() == 1);
00378                 unsigned attribute_value = face_data.AttributeValue;
00379                 p_boundary_element->SetRegion(attribute_value);
00380             }         
00381             actual_face_index++;
00382         }
00383     }
00384    
00385     if (mMetisPartitioning != DUMB && PetscTools::NumProcs()>1)
00386     {
00387         assert(this->mNodesPermutation.size() != 0);
00388         ReorderNodes(this->mNodesPermutation);
00389 
00390         // Compute nodes per processor based on offset information
00391         for (unsigned num_proc=0; num_proc<PetscTools::NumProcs()-1; num_proc++)
00392         {
00393             this->mNodesPerProcessor.push_back( proc_offsets[num_proc+1]-proc_offsets[num_proc] );
00394         }
00395         
00396         // Entry for the last processor
00397         this->mNodesPerProcessor.push_back( mTotalNumNodes - proc_offsets[PetscTools::NumProcs()-1] );
00398     }
00399 }
00400 
00401 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00402 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalNodes() const
00403 {
00404     return this->mNodes.size();
00405 }
00406 
00407 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00408 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalElements() const
00409 {
00410     return this->mElements.size();
00411 }
00412 
00413 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00414 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumNodes() const
00415 {
00416     return mTotalNumNodes;
00417 }
00418 
00419 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00420 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumElements() const
00421 {
00422     return mTotalNumElements;
00423 }
00424 
00425 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00426 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumBoundaryElements() const
00427 {
00428     return mTotalNumBoundaryElements;
00429 }
00430 
00431 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00432 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetElementOwnerships(unsigned lo, unsigned hi)
00433 {
00434     // This method exists just to keep compatibility with TetrahedralMesh.
00435     // In parallel, you only create the data structures for the elements you have been assigned.
00436     // Therefore, all the local elements are owned by the processor (obviously...)
00437     // We don't care about "hi" and "lo"    
00438     assert(hi>=lo);
00439     for (unsigned element_index=0; element_index<this->mElements.size(); element_index++)
00440     {
00441         Element<ELEMENT_DIM, SPACE_DIM>* p_element=this->mElements[element_index];
00442         p_element->SetOwnership(true);
00443     }
00444 }
00445 
00446 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00447 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterNode(unsigned index)
00448 {
00449     mNodesMapping[index] = this->mNodes.size();
00450 }    
00451 
00452 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00453 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterHaloNode(unsigned index)
00454 {
00455     mHaloNodesMapping[index] = mHaloNodes.size();    
00456 }    
00457 
00458 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00459 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterElement(unsigned index)
00460 {
00461     mElementsMapping[index] = this->mElements.size();    
00462 }    
00463 
00464 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00465 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterBoundaryElement(unsigned index)
00466 {
00467     mBoundaryElementsMapping[index] = this->mBoundaryElements.size();    
00468 }    
00469 
00470 
00471 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00472 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveNodeMapping(unsigned index) const
00473 {
00474     std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index); 
00475         
00476     if(node_position == mNodesMapping.end())
00477     {
00478         std::stringstream message;
00479         message << "Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank();
00480         EXCEPTION(message.str().c_str());        
00481     }
00482     return node_position->second;    
00483 }
00484 
00485 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00486 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
00487 {
00488     assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
00489     return mHaloNodesMapping[index];    
00490 }
00491 
00492 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00493 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveElementMapping(unsigned index) const
00494 {
00495     std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index); 
00496     
00497     if(element_position == mElementsMapping.end())
00498     {
00499         std::stringstream message;
00500         message << "Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00501         EXCEPTION(message.str().c_str());        
00502     }
00503 
00504     return element_position->second;    
00505 }            
00506 
00507 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00508 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveBoundaryElementMapping(unsigned index) const
00509 {
00510     std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index); 
00511     
00512     if(boundary_element_position == mBoundaryElementsMapping.end())
00513     {
00514         std::stringstream message;
00515         message << "Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00516         EXCEPTION(message.str().c_str());        
00517     }
00518 
00519     return boundary_element_position->second;    
00520 }            
00521 
00522 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00523 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DumbNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00524                                                                            std::set<unsigned>& rNodesOwned) 
00525 {
00526     DistributedVector::SetProblemSize(mTotalNumNodes);
00527     for(DistributedVector::Iterator node_number = DistributedVector::Begin(); node_number != DistributedVector::End(); ++node_number)
00528     {
00529          rNodesOwned.insert(node_number.Global);
00530     }
00531     
00532 }
00533 
00534 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00535 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::MetisBinaryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00536                                                                                   std::set<unsigned>& rNodesOwned, 
00537                                                                                   std::vector<unsigned>& rProcessorsOffset,
00538                                                                                   std::vector<unsigned>& rNodePermutation) 
00539 {
00540     assert(PetscTools::NumProcs() > 1);
00541     
00542     assert( ELEMENT_DIM==2 || ELEMENT_DIM==3 ); // Metis works with triangles and tetras
00543 
00544     unsigned num_procs = PetscTools::NumProcs();
00545     
00546     // Open a file for the elements
00547     OutputFileHandler handler("");
00548 
00549     // Filenames
00550     std::string basename = "metis.mesh";
00551     std::stringstream output_file;
00552     output_file << basename << ".npart." << num_procs;
00553     std::string nodes_per_proc_file = basename + ".nodesperproc";
00554 
00555     // Only the master process should do IO and call METIS
00556     if (handler.IsMaster())
00557     {
00558         /*
00559          *  Create input file for METIS
00560          */
00561         out_stream metis_file=handler.OpenOutputFile(basename);
00562 
00563         // File header
00564         (*metis_file)<<this->GetNumElements()<<"\t";
00565         if (ELEMENT_DIM==2)
00566         {
00567             (*metis_file)<<1<<"\n"; //1 is Metis speak for triangles
00568         }
00569         else
00570         {
00571             (*metis_file)<<2<<"\n"; //2 is Metis speak for tetrahedra
00572         }
00573 
00574 
00575         // Graph representation of the mesh
00576         for(unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00577         {
00578             ElementData element_data = rMeshReader.GetNextElementData();
00579     
00580             for(unsigned i=0; i<ELEMENT_DIM+1; i++)
00581             {
00582                     (*metis_file)<<element_data.NodeIndices[i] + 1<<"\t";
00583             }
00584             (*metis_file)<<"\n";
00585         }
00586         metis_file->close();
00587     
00588         rMeshReader.Reset();
00589 
00590         /*
00591          *  Call METIS binary to perform the partitioning.
00592          *  It will output a file called metis.mesh.npart.numProcs
00593          */
00594         std::stringstream permute_command;
00595         permute_command <<  "./bin/partdmesh "
00596                         <<  handler.GetOutputDirectoryFullPath("")
00597                         <<  basename << " "
00598                         <<  num_procs
00599                         <<  " > /dev/null";
00600 
00601         // METIS doesn't return 0 after a successful execution
00602         IGNORE_RET(system, permute_command.str());
00603     }
00604      
00605      /*
00606      * Wait for the permutation to be available
00607      */
00608     PetscTools::Barrier();
00609 
00610     /*
00611      *  Read partition file and compute local node ownership and processors offset
00612      */
00613     std::ifstream partition_stream;
00614     std::string full_path = handler.GetOutputDirectoryFullPath("")
00615                             + output_file.str();
00616 
00617     partition_stream.open(full_path.c_str());
00618     assert(partition_stream.is_open());
00619 
00620     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
00621     rProcessorsOffset.resize(PetscTools::NumProcs(), 0);
00622 
00623     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00624     {
00625         unsigned part_read;
00626 
00627         partition_stream >> part_read;
00628         
00629         // METIS output says I own this node
00630         if (part_read == PetscTools::GetMyRank())
00631         {
00632             rNodesOwned.insert(node_index);
00633         }
00634         
00635         // Offset is defined as the first node owned by a processor. We compute it incrementally.
00636         // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6 
00637         // offset a position
00638         for (unsigned proc=part_read+1; proc<PetscTools::NumProcs(); proc++)
00639         {
00640             rProcessorsOffset[proc]++;
00641         }
00642     }
00643 
00644     /*
00645      *  Once we know the offsets we can compute the permutation vector
00646      */
00647     // Move stream pointer to the beginning of the file
00648     partition_stream.seekg (0, std::ios::beg);
00649     
00650     std::vector<unsigned> local_index(PetscTools::NumProcs(), 0);    
00651     
00652     rNodePermutation.resize(this->GetNumNodes());
00653     
00654     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00655     {   
00656         unsigned part_read;
00657 
00658         partition_stream >> part_read;
00659 
00660         rNodePermutation[node_index] = rProcessorsOffset[part_read] + local_index[part_read];
00661         
00662         local_index[part_read]++;
00663     }
00664 
00665     partition_stream.close();
00666 
00667 }
00668 
00669 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00670 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::MetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM> &rMeshReader,
00671                                                                                   std::set<unsigned>& rNodesOwned, 
00672                                                                                   std::vector<unsigned>& rProcessorsOffset,
00673                                                                                   std::vector<unsigned>& rNodePermutation)
00674 {
00675     assert(PetscTools::NumProcs() > 1);
00676     
00677     assert( ELEMENT_DIM==2 || ELEMENT_DIM==3 ); // Metis works with triangles and tetras
00678 
00679 
00680     int ne = rMeshReader.GetNumElements(); 
00681     int nn = rMeshReader.GetNumNodes(); 
00682     idxtype* elmnts = new idxtype[ne * (ELEMENT_DIM+1)];
00683     assert(elmnts != NULL);   
00684 
00685     unsigned counter=0;    
00686     for(unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00687     {
00688         ElementData element_data = rMeshReader.GetNextElementData();
00689 
00690         for(unsigned i=0; i<ELEMENT_DIM+1; i++)
00691         {
00692             elmnts[counter++] = element_data.NodeIndices[i];
00693         }
00694     }
00695     rMeshReader.Reset();    
00696      
00697     int etype;
00698     
00699     switch (ELEMENT_DIM)
00700     {
00701         case 2:          
00702             etype = 1; //1 is Metis speak for triangles
00703             break;
00704         case 3:
00705             etype = 2; //2 is Metis speak for tetrahedra
00706             break;
00707         default:
00708             NEVER_REACHED;
00709     }    
00710             
00711     int numflag = 0; //0 means C-style numbering is assumed 
00712     int nparts = PetscTools::NumProcs();
00713     int edgecut;
00714     idxtype* epart = new idxtype[ne];
00715     assert(epart != NULL); 
00716     idxtype* npart = new idxtype[nn];
00717     assert(epart != NULL);
00718 
00719     METIS_PartMeshNodal(&ne, &nn, elmnts, &etype, &numflag, &nparts, &edgecut, epart, npart);//, wgetflag, vwgt);    
00720 
00721     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
00722     rProcessorsOffset.resize(PetscTools::NumProcs(), 0);
00723 
00724     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00725     {
00726         unsigned part_read = npart[node_index];
00727         
00728         // METIS output says I own this node
00729         if (part_read == PetscTools::GetMyRank())
00730         {
00731             rNodesOwned.insert(node_index);
00732         }
00733         
00734         // Offset is defined as the first node owned by a processor. We compute it incrementally.
00735         // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6 
00736         // offset a position.
00737         for (unsigned proc=part_read+1; proc<PetscTools::NumProcs(); proc++)
00738         {
00739             rProcessorsOffset[proc]++;
00740         }
00741     }
00742 
00743     /*
00744      *  Once we know the offsets we can compute the permutation vector
00745      */    
00746     std::vector<unsigned> local_index(PetscTools::NumProcs(), 0);    
00747     
00748     rNodePermutation.resize(this->GetNumNodes());
00749     
00750     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00751     {   
00752         unsigned part_read = npart[node_index];
00753 
00754         rNodePermutation[node_index] = rProcessorsOffset[part_read] + local_index[part_read];
00755         
00756         local_index[part_read]++;
00757     }
00758     //std::cout << rNodePermutation.size() << std::endl;
00759     //std::cout << this->mNodesPermutation.size() << std::endl;
00760     
00761     delete[] elmnts;
00762     delete[] epart;
00763     delete[] npart;
00764 }
00765 
00766 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00767 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ReorderNodes(std::vector<unsigned>& rNodePermutation)
00768 {
00769     assert(PetscTools::NumProcs() > 1);
00770     
00771     // Need to rebuild global-local maps
00772     mNodesMapping.clear();
00773     mHaloNodesMapping.clear();
00774     
00775     //Update indices
00776     for (unsigned index=0; index<this->mNodes.size(); index++)
00777     {
00778         unsigned old_index = this->mNodes[index]->GetIndex();
00779         unsigned new_index = rNodePermutation[old_index];
00780         
00781         this->mNodes[index]->SetIndex(new_index);
00782         mNodesMapping[new_index] = index;
00783     }
00784 
00785     for (unsigned index=0; index<mHaloNodes.size(); index++)
00786     {
00787         unsigned old_index = mHaloNodes[index]->GetIndex();
00788         unsigned new_index = rNodePermutation[old_index];
00789         
00790         mHaloNodes[index]->SetIndex(new_index);
00791         mHaloNodesMapping[new_index] = index;
00792     }
00793 }
00794 
00795 
00796 #endif /*PARALLELTETRAHEDRALMESH_HPP_*/

Generated on Wed Mar 18 12:51:52 2009 for Chaste by  doxygen 1.5.5