ParallelTetrahedralMesh.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2009
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 #include "ParallelTetrahedralMesh.hpp"
00030 
00031 #include <cassert>
00032 #include <sstream>
00033 #include <string>
00034 
00035 #include "Exception.hpp"
00036 #include "Element.hpp"
00037 #include "BoundaryElement.hpp"
00038 
00039 #include "PetscTools.hpp"
00040 #include "DistributedVectorFactory.hpp"
00041 #include "OutputFileHandler.hpp"
00042 
00043 
00045 //   IMPLEMENTATION
00047 
00048 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00049 ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ParallelTetrahedralMesh(PartitionType metisPartitioning)
00050     : mMetisPartitioning(metisPartitioning)
00051 {
00052 }
00053 
00054 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00055 ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::~ParallelTetrahedralMesh()
00056 {
00057     for (unsigned i=0; i<this->mHaloNodes.size(); i++)
00058     {
00059         delete this->mHaloNodes[i];
00060     }
00061 }
00062 
00063 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00064 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ComputeMeshPartitioning(
00065     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00066     std::set<unsigned>& rNodesOwned,
00067     std::set<unsigned>& rHaloNodesOwned,
00068     std::set<unsigned>& rElementsOwned,
00069     std::vector<unsigned>& rProcessorsOffset,
00070     std::vector<unsigned>& rNodePermutation)
00071 {
00073 
00074     if (mMetisPartitioning==METIS_BINARY && PetscTools::GetNumProcs() > 1)
00075     {
00076         MetisBinaryNodePartitioning(rMeshReader, rNodesOwned, rProcessorsOffset, rNodePermutation);
00077     }
00078     else if (mMetisPartitioning==METIS_LIBRARY && PetscTools::GetNumProcs() > 1)
00079     {
00080         MetisLibraryNodePartitioning(rMeshReader, rNodesOwned, rProcessorsOffset, rNodePermutation);
00081     }
00082     else
00083     {
00084         DumbNodePartitioning(rMeshReader, rNodesOwned);
00085     }
00086 
00087     for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00088     {
00089         ElementData element_data = rMeshReader.GetNextElementData();
00090 
00091         bool element_owned = false;
00092         std::set<unsigned> temp_halo_nodes;
00093 
00094         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00095         {
00096             if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
00097             {
00098                 element_owned = true;
00099                 rElementsOwned.insert(element_number);
00100             }
00101             else
00102             {
00103                 temp_halo_nodes.insert(element_data.NodeIndices[i]);
00104             }
00105         }
00106 
00107         if (element_owned)
00108         {
00109             rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
00110         }
00111     }
00112 
00113     rMeshReader.Reset();
00114 }
00115 
00116 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00117 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructFromMeshReader(
00118     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00119     bool cullInternalFaces)
00120 {
00121     // ticket #922: Commented as it will break coverage until 
00122     // face culling is supported in parallel and dontTestConstructFromMeshReader1D 
00123     // becomes a test
00124 //    if (ELEMENT_DIM==1)
00125 //    {
00126 //        cullInternalFaces = true;
00127 //    }
00128 
00129     // ticket #922: Face culling is not supported in parallel yet
00130     assert(!cullInternalFaces);
00131 
00132     this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
00133     mTotalNumElements = rMeshReader.GetNumElements();
00134     mTotalNumNodes = rMeshReader.GetNumNodes();
00135     mTotalNumBoundaryElements = rMeshReader.GetNumFaces();
00136 
00137     std::set<unsigned> nodes_owned;
00138     std::set<unsigned> halo_nodes_owned;
00139     std::set<unsigned> elements_owned;
00140     std::vector<unsigned> proc_offsets;//(PetscTools::GetNumProcs());
00141 
00142     ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets, this->mNodesPermutation);
00143 
00144     // Reserve memory
00145     this->mElements.reserve(elements_owned.size());
00146     this->mNodes.reserve(nodes_owned.size());
00147 
00148     // Load the nodes owned by the processor
00149     std::vector<double> coords;
00150     for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
00151     {
00153         coords = rMeshReader.GetNextNode();
00154 
00155         // The node is owned by the processor
00156         if (nodes_owned.find(node_index) != nodes_owned.end())
00157         {
00158             RegisterNode(node_index);
00159             this->mNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00160             continue;
00161         }
00162 
00163         // The node is a halo node in this processor
00164         if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
00165         {
00166             RegisterHaloNode(node_index);
00167             mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00168         }
00169     }
00170 
00171     // Load the elements owned by the processor
00172     for (unsigned element_index=0; element_index < mTotalNumElements; element_index++)
00173     {
00174         ElementData element_data = rMeshReader.GetNextElementData();
00175 
00176         // The element is owned by the processor
00177         if (elements_owned.find(element_index) != elements_owned.end())
00178         {
00179             std::vector<Node<SPACE_DIM>*> nodes;
00180             unsigned node_local_index;
00181             for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00182             {
00183                 if (nodes_owned.find(element_data.NodeIndices[j]) != nodes_owned.end())
00184                 {
00185                     node_local_index = SolveNodeMapping(element_data.NodeIndices[j]);
00186                     nodes.push_back(this->mNodes[node_local_index]);
00187                 }
00188                 else
00189                 {
00190                     node_local_index = SolveHaloNodeMapping(element_data.NodeIndices[j]);
00191                     nodes.push_back(this->mHaloNodes[node_local_index]);
00192                 }
00193             }
00194 
00195             RegisterElement(element_index);
00196 
00197             Element<ELEMENT_DIM,SPACE_DIM> *p_element = new Element<ELEMENT_DIM,SPACE_DIM>(element_index, nodes);
00198             this->mElements.push_back(p_element);
00199 
00200             if (rMeshReader.GetNumElementAttributes() > 0)
00201             {
00202                 assert(rMeshReader.GetNumElementAttributes() == 1);
00203                 unsigned attribute_value = element_data.AttributeValue;
00204                 p_element->SetRegion(attribute_value);
00205             }
00206         }
00207     }
00208 
00209     // Boundary nodes and elements
00210     unsigned actual_face_index = 0;
00211     for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
00212     {
00213         ElementData face_data = rMeshReader.GetNextFaceData();
00214         std::vector<unsigned> node_indices = face_data.NodeIndices;
00215 
00216         bool own = false;
00217 
00218         for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00219         {
00220             // if I own this node
00221             if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
00222             {
00223                 own = true;
00224             }
00225         }
00226 
00227         if (!own)
00228         {
00229             // ticket #922: If we are culling internal faces we need to check if this is an external one before incrementing the index.
00230             //              This turned to be tricky in parallel... since you can't check it in faces you don't own.
00231             assert(!cullInternalFaces);
00232             actual_face_index++;
00233             continue;
00234         }
00235 
00236         bool is_boundary_face = true;
00237 
00238         // Determine if this is a boundary face
00239         std::set<unsigned> containing_element_indices; // Elements that contain this face
00240         std::vector<Node<SPACE_DIM>*> nodes;
00241 
00242         for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00243         {
00244             // if I own this node
00245             if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
00246             {
00247                 // Add Node pointer to list for creating an element
00248                 unsigned node_local_index = SolveNodeMapping(node_indices[node_index]);
00249                 nodes.push_back(this->mNodes[node_local_index]);
00250             }
00251 
00252             // if I halo-own this node
00253             if (mHaloNodesMapping.find(node_indices[node_index]) != mHaloNodesMapping.end())
00254             {
00255                 // Add Node pointer to list for creating an element
00256                 unsigned node_local_index = SolveHaloNodeMapping(node_indices[node_index]);
00257                 nodes.push_back(this->mHaloNodes[node_local_index]);
00258             }
00259         }
00260 
00261         if (is_boundary_face)
00262         {
00263             // This is a boundary face
00264             // Ensure all its nodes are marked as boundary nodes
00265             for (unsigned j=0; j<nodes.size(); j++)
00266             {
00267                 if (!nodes[j]->IsBoundaryNode())
00268                 {
00269                     nodes[j]->SetAsBoundaryNode();
00270                     this->mBoundaryNodes.push_back(nodes[j]);
00271                 }
00272                 // Register the index that this bounday element will have with the node
00273                 nodes[j]->AddBoundaryElement(actual_face_index);
00274             }
00275 
00276             RegisterBoundaryElement(actual_face_index);
00277             BoundaryElement<ELEMENT_DIM-1,SPACE_DIM> *p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(actual_face_index, nodes);
00278             this->mBoundaryElements.push_back(p_boundary_element);
00279 
00280             if (rMeshReader.GetNumFaceAttributes() > 0)
00281             {
00282                 assert(rMeshReader.GetNumFaceAttributes() == 1);
00283                 unsigned attribute_value = face_data.AttributeValue;
00284                 p_boundary_element->SetRegion(attribute_value);
00285             }
00286             actual_face_index++;
00287         }
00288     }
00289 
00290     if (mMetisPartitioning != DUMB && PetscTools::GetNumProcs()>1)
00291     {
00292         assert(this->mNodesPermutation.size() != 0);
00293         ReorderNodes(this->mNodesPermutation);
00294 
00295         unsigned num_owned;
00296         unsigned rank = PetscTools::GetMyRank();
00297         if ( rank<PetscTools::GetNumProcs()-1 )
00298         {
00299             num_owned =  proc_offsets[rank+1]-proc_offsets[rank];
00300         }
00301         else
00302         {
00303             num_owned = mTotalNumNodes - proc_offsets[rank];
00304         }
00305         
00306         this->mpDistributedVectorFactory = new DistributedVectorFactory(this->GetNumNodes(), num_owned);
00307     }
00308     else 
00309     {
00310         // Dumb or sequential partition
00311         assert(this->mpDistributedVectorFactory);
00312     }
00313 }
00314 
00315 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00316 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalNodes() const
00317 {
00318     return this->mNodes.size();
00319 }
00320 
00321 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00322 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalElements() const
00323 {
00324     return this->mElements.size();
00325 }
00326 
00327 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00328 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumNodes() const
00329 {
00330     return mTotalNumNodes;
00331 }
00332 
00333 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00334 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumElements() const
00335 {
00336     return mTotalNumElements;
00337 }
00338 
00339 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00340 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumBoundaryElements() const
00341 {
00342     return mTotalNumBoundaryElements;
00343 }
00344 
00345 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00346 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetElementOwnerships(unsigned lo, unsigned hi)
00347 {
00348     // This method exists just to keep compatibility with TetrahedralMesh.
00349     // In parallel, you only create the data structures for the elements you have been assigned.
00350     // Therefore, all the local elements are owned by the processor (obviously...)
00351     // We don't care about "hi" and "lo"
00352     assert(hi>=lo);
00353     for (unsigned element_index=0; element_index<this->mElements.size(); element_index++)
00354     {
00355         Element<ELEMENT_DIM, SPACE_DIM> *p_element=this->mElements[element_index];
00356         p_element->SetOwnership(true);
00357     }
00358 }
00359 
00360 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00361 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterNode(unsigned index)
00362 {
00363     mNodesMapping[index] = this->mNodes.size();
00364 }
00365 
00366 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00367 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterHaloNode(unsigned index)
00368 {
00369     mHaloNodesMapping[index] = mHaloNodes.size();
00370 }
00371 
00372 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00373 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterElement(unsigned index)
00374 {
00375     mElementsMapping[index] = this->mElements.size();
00376 }
00377 
00378 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00379 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterBoundaryElement(unsigned index)
00380 {
00381     mBoundaryElementsMapping[index] = this->mBoundaryElements.size();
00382 }
00383 
00384 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00385 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveNodeMapping(unsigned index) const
00386 {
00387     std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index);
00388 
00389     if (node_position == mNodesMapping.end())
00390     {
00391         std::stringstream message;
00392         message << "Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank();
00393         EXCEPTION(message.str().c_str());
00394     }
00395     return node_position->second;
00396 }
00397 
00398 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00399 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
00400 {
00401     assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
00402     return mHaloNodesMapping[index];
00403 }
00404 
00405 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00406 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveElementMapping(unsigned index) const
00407 {
00408     std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index);
00409 
00410     if (element_position == mElementsMapping.end())
00411     {
00412         std::stringstream message;
00413         message << "Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00414         EXCEPTION(message.str().c_str());
00415     }
00416 
00417     return element_position->second;
00418 }
00419 
00420 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00421 unsigned ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveBoundaryElementMapping(unsigned index) const
00422 {
00423     std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index);
00424 
00425     if (boundary_element_position == mBoundaryElementsMapping.end())
00426     {
00427         std::stringstream message;
00428         message << "Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00429         EXCEPTION(message.str().c_str());
00430     }
00431 
00432     return boundary_element_position->second;
00433 }
00434 
00435 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00436 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DumbNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00437                                                                            std::set<unsigned>& rNodesOwned)
00438 {
00439     DistributedVectorFactory factory(mTotalNumNodes); 
00440     
00441     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
00442     for (unsigned node_index = this->mpDistributedVectorFactory->GetLow();
00443          node_index < this->mpDistributedVectorFactory->GetHigh();
00444          node_index++)         
00445     {
00446          rNodesOwned.insert(node_index);
00447     }
00448 }
00449 
00450 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00451 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::MetisBinaryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00452                                                                                   std::set<unsigned>& rNodesOwned,
00453                                                                                   std::vector<unsigned>& rProcessorsOffset,
00454                                                                                   std::vector<unsigned>& rNodePermutation)
00455 {
00456     assert(PetscTools::GetNumProcs() > 1);
00457     #define COVERAGE_IGNORE
00458     assert( ELEMENT_DIM==2 || ELEMENT_DIM==3 ); // Metis works with triangles and tetras
00459     #undef COVERAGE_IGNORE
00460 
00461     unsigned num_procs = PetscTools::GetNumProcs();
00462 
00463     // Open a file for the elements
00464     OutputFileHandler handler("");
00465 
00466     // Filenames
00467     std::string basename = "metis.mesh";
00468     std::stringstream output_file;
00469     output_file << basename << ".npart." << num_procs;
00470     std::string nodes_per_proc_file = basename + ".nodesperproc";
00471 
00472     // Only the master process should do IO and call METIS
00473     if (handler.IsMaster())
00474     {
00475         /*
00476          *  Create input file for METIS
00477          */
00478         out_stream metis_file=handler.OpenOutputFile(basename);
00479 
00480         // File header
00481         (*metis_file) << this->GetNumElements() << "\t";
00482         if (ELEMENT_DIM==2)
00483         {
00484             (*metis_file) << 1 << "\n"; //1 is Metis speak for triangles
00485         }
00486         else
00487         {
00488             (*metis_file) << 2 << "\n"; //2 is Metis speak for tetrahedra
00489         }
00490 
00491         // Graph representation of the mesh
00492         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00493         {
00494             ElementData element_data = rMeshReader.GetNextElementData();
00495 
00496             for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00497             {
00498                     (*metis_file) << element_data.NodeIndices[i] + 1 << "\t";
00499             }
00500             (*metis_file) << "\n";
00501         }
00502         metis_file->close();
00503 
00504         rMeshReader.Reset();
00505 
00506         /*
00507          * Call METIS binary to perform the partitioning.
00508          * It will output a file called metis.mesh.npart.numProcs
00509          */
00510         std::stringstream permute_command;
00511         permute_command <<  "./bin/partdmesh "
00512                         <<  handler.GetOutputDirectoryFullPath("")
00513                         <<  basename << " "
00514                         <<  num_procs
00515                         <<  " > /dev/null";
00516 
00517         // METIS doesn't return 0 after a successful execution
00518         IGNORE_RET(system, permute_command.str());
00519     }
00520 
00521     /*
00522      * Wait for the permutation to be available
00523      */
00524     PetscTools::Barrier();
00525 
00526     /*
00527      *  Read partition file and compute local node ownership and processors offset
00528      */
00529     std::ifstream partition_stream;
00530     std::string full_path = handler.GetOutputDirectoryFullPath("")
00531                             + output_file.str();
00532 
00533     partition_stream.open(full_path.c_str());
00534     assert(partition_stream.is_open());
00535 
00536     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
00537     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
00538 
00539     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00540     {
00541         unsigned part_read;
00542 
00543         partition_stream >> part_read;
00544 
00545         // METIS output says I own this node
00546         if (part_read == PetscTools::GetMyRank())
00547         {
00548             rNodesOwned.insert(node_index);
00549         }
00550 
00551         // Offset is defined as the first node owned by a processor. We compute it incrementally.
00552         // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
00553         // offset a position
00554         for (unsigned proc=part_read+1; proc<PetscTools::GetNumProcs(); proc++)
00555         {
00556             rProcessorsOffset[proc]++;
00557         }
00558     }
00559 
00560     /*
00561      *  Once we know the offsets we can compute the permutation vector
00562      */
00563     // Move stream pointer to the beginning of the file
00564     partition_stream.seekg (0, std::ios::beg);
00565 
00566     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
00567 
00568     rNodePermutation.resize(this->GetNumNodes());
00569 
00570     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00571     {
00572         unsigned part_read;
00573 
00574         partition_stream >> part_read;
00575 
00576         rNodePermutation[node_index] = rProcessorsOffset[part_read] + local_index[part_read];
00577 
00578         local_index[part_read]++;
00579     }
00580 
00581     partition_stream.close();
00582 
00583 }
00584 
00585 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00586 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::MetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00587                                                                                   std::set<unsigned>& rNodesOwned,
00588                                                                                   std::vector<unsigned>& rProcessorsOffset,
00589                                                                                   std::vector<unsigned>& rNodePermutation)
00590 {
00591     assert(PetscTools::GetNumProcs() > 1);
00592 
00593     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
00594 
00595     int ne = rMeshReader.GetNumElements();
00596     int nn = rMeshReader.GetNumNodes();
00597     idxtype* elmnts = new idxtype[ne * (ELEMENT_DIM+1)];
00598     assert(elmnts != NULL);
00599 
00600     unsigned counter=0;
00601     for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00602     {
00603         ElementData element_data = rMeshReader.GetNextElementData();
00604 
00605         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00606         {
00607             elmnts[counter++] = element_data.NodeIndices[i];
00608         }
00609     }
00610     rMeshReader.Reset();
00611 
00612     int etype;
00613 
00614     switch (ELEMENT_DIM)
00615     {
00616         case 2:
00617             etype = 1; //1 is Metis speak for triangles
00618             break;
00619         case 3:
00620             etype = 2; //2 is Metis speak for tetrahedra
00621             break;
00622         default:
00623             NEVER_REACHED;
00624     }
00625 
00626     int numflag = 0; //0 means C-style numbering is assumed
00627     int nparts = PetscTools::GetNumProcs();
00628     int edgecut;
00629     idxtype* epart = new idxtype[ne];
00630     assert(epart != NULL);
00631     idxtype* npart = new idxtype[nn];
00632     assert(epart != NULL);
00633 
00634     METIS_PartMeshNodal(&ne, &nn, elmnts, &etype, &numflag, &nparts, &edgecut, epart, npart);//, wgetflag, vwgt);
00635 
00636     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
00637     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
00638 
00639     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00640     {
00641         unsigned part_read = npart[node_index];
00642 
00643         // METIS output says I own this node
00644         if (part_read == PetscTools::GetMyRank())
00645         {
00646             rNodesOwned.insert(node_index);
00647         }
00648 
00649         // Offset is defined as the first node owned by a processor. We compute it incrementally.
00650         // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
00651         // offset a position.
00652         for (unsigned proc=part_read+1; proc<PetscTools::GetNumProcs(); proc++)
00653         {
00654             rProcessorsOffset[proc]++;
00655         }
00656     }
00657 
00658     /*
00659      *  Once we know the offsets we can compute the permutation vector
00660      */
00661     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
00662 
00663     rNodePermutation.resize(this->GetNumNodes());
00664 
00665     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00666     {
00667         unsigned part_read = npart[node_index];
00668 
00669         rNodePermutation[node_index] = rProcessorsOffset[part_read] + local_index[part_read];
00670 
00671         local_index[part_read]++;
00672     }
00673     //std::cout << rNodePermutation.size() << std::endl;
00674     //std::cout << this->mNodesPermutation.size() << std::endl;
00675 
00676     delete[] elmnts;
00677     delete[] epart;
00678     delete[] npart;
00679 }
00680 
00681 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00682 void ParallelTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ReorderNodes(std::vector<unsigned>& rNodePermutation)
00683 {
00684     assert(PetscTools::GetNumProcs() > 1);
00685 
00686     // Need to rebuild global-local maps
00687     mNodesMapping.clear();
00688     mHaloNodesMapping.clear();
00689 
00690     // Update indices
00691     for (unsigned index=0; index<this->mNodes.size(); index++)
00692     {
00693         unsigned old_index = this->mNodes[index]->GetIndex();
00694         unsigned new_index = rNodePermutation[old_index];
00695 
00696         this->mNodes[index]->SetIndex(new_index);
00697         mNodesMapping[new_index] = index;
00698     }
00699 
00700     for (unsigned index=0; index<mHaloNodes.size(); index++)
00701     {
00702         unsigned old_index = mHaloNodes[index]->GetIndex();
00703         unsigned new_index = rNodePermutation[old_index];
00704 
00705         mHaloNodes[index]->SetIndex(new_index);
00706         mHaloNodesMapping[new_index] = index;
00707     }
00708 }
00709 
00711 // Explicit instantiation
00713 
00714 template class ParallelTetrahedralMesh<1,1>;
00715 template class ParallelTetrahedralMesh<1,2>;
00716 template class ParallelTetrahedralMesh<1,3>;
00717 template class ParallelTetrahedralMesh<2,2>;
00718 template class ParallelTetrahedralMesh<2,3>;
00719 template class ParallelTetrahedralMesh<3,3>;

Generated on Tue Aug 4 16:10:22 2009 for Chaste by  doxygen 1.5.5