DistributedTetrahedralMesh.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2010
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 #include "DistributedTetrahedralMesh.hpp"
00030 
00031 #include <cassert>
00032 #include <sstream>
00033 #include <string>
00034 
00035 #include "Exception.hpp"
00036 #include "Element.hpp"
00037 #include "BoundaryElement.hpp"
00038 
00039 #include "PetscTools.hpp"
00040 #include "DistributedVectorFactory.hpp"
00041 #include "OutputFileHandler.hpp"
00042 
00043 #include "RandomNumberGenerator.hpp"
00044 
00045 #include "Timer.hpp"
00046 
00047 #include "petscao.h"
00048 
00050 //   IMPLEMENTATION
00052 
00053 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00054 DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DistributedTetrahedralMesh(PartitionType metisPartitioning)
00055     :
00056       mTotalNumElements(0u),
00057       mTotalNumBoundaryElements(0u),
00058       mTotalNumNodes(0u),
00059       mMetisPartitioning(metisPartitioning)
00060 {
00061     if (ELEMENT_DIM == 1)
00062     {
00063         //No METIS partition is possible - revert to DUMB
00064         mMetisPartitioning=DUMB;
00065     }
00066 }
00067 
00068 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00069 DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::~DistributedTetrahedralMesh()
00070 {
00071     for (unsigned i=0; i<this->mHaloNodes.size(); i++)
00072     {
00073         delete this->mHaloNodes[i];
00074     }
00075 }
00076 
00077 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00078 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetDistributedVectorFactory(DistributedVectorFactory* pFactory)
00079 {
00080     AbstractMesh<ELEMENT_DIM,SPACE_DIM>::SetDistributedVectorFactory(pFactory);
00081     mMetisPartitioning = DUMB;
00082 }
00083 
00084 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00085 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ComputeMeshPartitioning(
00086     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00087     std::set<unsigned>& rNodesOwned,
00088     std::set<unsigned>& rHaloNodesOwned,
00089     std::set<unsigned>& rElementsOwned,
00090     std::vector<unsigned>& rProcessorsOffset)
00091 {
00093 
00094     if (mMetisPartitioning==PARMETIS_LIBRARY && !PetscTools::IsSequential())
00095     {
00096         /*
00097          *  With ParMetisLibraryNodePartitioning we compute the element partition first
00098          *  and then we work out the node ownership.
00099          */
00100         ParMetisLibraryNodePartitioning(rMeshReader, rElementsOwned, rNodesOwned, rHaloNodesOwned, rProcessorsOffset);
00101     }
00102     else
00103     {
00104         /*
00105          *  Otherwise we compute the node partition and then we workout element distribution
00106          */
00107         if (mMetisPartitioning==METIS_LIBRARY && !PetscTools::IsSequential())
00108         {
00109             MetisLibraryNodePartitioning(rMeshReader, rNodesOwned, rProcessorsOffset);
00110         }
00111         else if (mMetisPartitioning==PETSC_MAT_PARTITION && !PetscTools::IsSequential())
00112         {
00113             PetscMatrixPartitioning(rMeshReader, rNodesOwned, rProcessorsOffset);
00114         }
00115         else
00116         {
00117             DumbNodePartitioning(rMeshReader, rNodesOwned);
00118         }
00119 
00120         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00121         {
00122             ElementData element_data = rMeshReader.GetNextElementData();
00123 
00124             bool element_owned = false;
00125             std::set<unsigned> temp_halo_nodes;
00126 
00127             for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00128             {
00129                 if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
00130                 {
00131                     element_owned = true;
00132                     rElementsOwned.insert(element_number);
00133                 }
00134                 else
00135                 {
00136                     temp_halo_nodes.insert(element_data.NodeIndices[i]);
00137                 }
00138             }
00139 
00140             if (element_owned)
00141             {
00142                 rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
00143             }
00144         }
00145 
00146         if (mMetisPartitioning==PETSC_MAT_PARTITION && !PetscTools::IsSequential())
00147         {
00148             PetscTools::Barrier();
00149             if(PetscTools::AmMaster())
00150             {
00151                 Timer::PrintAndReset("Element and halo node assignation");
00152             }
00153         }        
00154         
00155     }
00156     rMeshReader.Reset();
00157 }
00158 
00159 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00160 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructFromMeshReader(
00161     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader)
00162 {
00163     std::set<unsigned> nodes_owned;
00164     std::set<unsigned> halo_nodes_owned;
00165     std::set<unsigned> elements_owned;
00166     std::vector<unsigned> proc_offsets;//(PetscTools::GetNumProcs());
00167 
00168     this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
00169     mTotalNumElements = rMeshReader.GetNumElements();
00170     mTotalNumBoundaryElements = rMeshReader.GetNumFaces();
00171     mTotalNumNodes = rMeshReader.GetNumNodes();
00172 
00173 
00174     PetscTools::Barrier();
00175     Timer::Reset();
00176     ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets);
00177     PetscTools::Barrier();
00178     //Timer::Print("partitioning");
00179 
00180     // Reserve memory
00181     this->mElements.reserve(elements_owned.size());
00182     this->mNodes.reserve(nodes_owned.size());
00183 
00184     if ( rMeshReader.IsFileFormatBinary() )
00185     {
00186         std::vector<double> coords;
00187         // Binary : load only the nodes which are needed
00188         for (std::set<unsigned>::const_iterator it=nodes_owned.begin(); it!=nodes_owned.end(); it++)
00189         {
00190             //Loop over wholey-owned nodes
00191             unsigned global_node_index=*it;
00192             coords = rMeshReader.GetNode(global_node_index);
00193             RegisterNode(global_node_index);
00194             this->mNodes.push_back(new Node<SPACE_DIM>(global_node_index, coords, false));
00195         }
00196         for (std::set<unsigned>::const_iterator it=halo_nodes_owned.begin(); it!=halo_nodes_owned.end(); it++)
00197         {
00198             //Loop over halo-owned nodes
00199             unsigned global_node_index=*it;
00200             coords = rMeshReader.GetNode(global_node_index);
00201             RegisterHaloNode(global_node_index);
00202             mHaloNodes.push_back(new Node<SPACE_DIM>(global_node_index, coords, false));
00203         }
00204     }
00205     else
00206     {
00207         // Ascii : Sequentially load all the nodes and store those owned (or halo-owned) by the process
00208         for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
00209         {
00210             std::vector<double> coords;
00212             coords = rMeshReader.GetNextNode();
00213 
00214             // The node is owned by the processor
00215             if (nodes_owned.find(node_index) != nodes_owned.end())
00216             {
00217                 RegisterNode(node_index);
00218                 this->mNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00219             }
00220 
00221             // The node is a halo node in this processor
00222             if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
00223             {
00224                 RegisterHaloNode(node_index);
00225                 mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00226             }
00227         }
00228     }
00229 
00230     if ( rMeshReader.IsFileFormatBinary() )
00231     {
00232         // Binary format, we loop only over the elements we have been assigned
00233         for (std::set<unsigned>::const_iterator elem_it=elements_owned.begin(); elem_it!=elements_owned.end(); elem_it++)
00234         {
00235             unsigned global_element_index=*elem_it;
00236             ElementData element_data;
00237             element_data = rMeshReader.GetElementData(global_element_index);
00238 
00239             std::vector<Node<SPACE_DIM>*> nodes;
00240             for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00241             {
00242                 //because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
00243                 nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
00244             }
00245 
00246             RegisterElement(global_element_index);
00247             Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(global_element_index, nodes);
00248             this->mElements.push_back(p_element);
00249 
00250             if (rMeshReader.GetNumElementAttributes() > 0)
00251             {
00252                 assert(rMeshReader.GetNumElementAttributes() == 1);
00253                 unsigned attribute_value = element_data.AttributeValue;
00254                 p_element->SetRegion(attribute_value);
00255             }
00256         }
00257     }
00258     else
00259     {
00260         // Load the elements owned by the processor
00261         for (unsigned element_index=0; element_index < mTotalNumElements; element_index++)
00262         {
00263             ElementData element_data;
00264 
00265             element_data = rMeshReader.GetNextElementData();
00266 
00267             // The element is owned by the processor
00268             if (elements_owned.find(element_index) != elements_owned.end())
00269             {
00270                 std::vector<Node<SPACE_DIM>*> nodes;
00271                 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00272                 {
00273                     //because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
00274                     nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
00275                 }
00276 
00277                 RegisterElement(element_index);
00278 
00279                 Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(element_index, nodes);
00280                 this->mElements.push_back(p_element);
00281 
00282                 if (rMeshReader.GetNumElementAttributes() > 0)
00283                 {
00284                     assert(rMeshReader.GetNumElementAttributes() == 1);
00285                     unsigned attribute_value = element_data.AttributeValue;
00286                     p_element->SetRegion(attribute_value);
00287                 }
00288             }
00289         }
00290     }
00291 
00292     // Boundary nodes and elements
00293     try
00294     {
00295         for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
00296         {
00297             ElementData face_data = rMeshReader.GetNextFaceData();
00298             std::vector<unsigned> node_indices = face_data.NodeIndices;
00299 
00300             bool own = false;
00301 
00302             for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00303             {
00304                 // if I own this node
00305                 if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
00306                 {
00307                     own = true;
00308                     break;
00309                 }
00310             }
00311 
00312             if (!own)
00313             {
00314                 continue;
00315             }
00316 
00317             // Determine if this is a boundary face
00318             std::set<unsigned> containing_element_indices; // Elements that contain this face
00319             std::vector<Node<SPACE_DIM>*> nodes;
00320 
00321             for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00322             {
00323                 //because we have populated mNodes and mHaloNodes above, we can now use this method,
00324                 //which SHOULD never throw (but it does).
00325                 try
00326                 {
00327                     nodes.push_back(this->GetNodeOrHaloNode(node_indices[node_index]));
00328                 }
00329                 catch (Exception &e)
00330                 {
00331                     std::stringstream message;
00332                     message << "Face does not appear in element file (Face " << face_index << " in "<<this->mMeshFileBaseName<< ")";
00333                     EXCEPTION(message.str().c_str());
00334                 }
00335             }
00336 
00337             // This is a boundary face
00338             // Ensure all its nodes are marked as boundary nodes
00339             for (unsigned j=0; j<nodes.size(); j++)
00340             {
00341                 if (!nodes[j]->IsBoundaryNode())
00342                 {
00343                     nodes[j]->SetAsBoundaryNode();
00344                     this->mBoundaryNodes.push_back(nodes[j]);
00345                 }
00346                 // Register the index that this bounday element will have with the node
00347                 nodes[j]->AddBoundaryElement(face_index);
00348             }
00349 
00350             RegisterBoundaryElement(face_index);
00351             BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>* p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(face_index, nodes);
00352             this->mBoundaryElements.push_back(p_boundary_element);
00353 
00354             if (rMeshReader.GetNumFaceAttributes() > 0)
00355             {
00356                 assert(rMeshReader.GetNumFaceAttributes() == 1);
00357                 unsigned attribute_value = face_data.AttributeValue;
00358                 p_boundary_element->SetRegion(attribute_value);
00359             }
00360         }
00361     }
00362     catch (Exception &e)
00363     {
00364         PetscTools::ReplicateException(true); //Bad face exception
00365         throw e;
00366     }
00367     //EXCEPTION("before deadlocking");
00368 //    std::cout << "before!" << std::flush <<std::endl;
00369     PetscTools::ReplicateException(false);
00370 //    std::cout << "went past this!" << std::flush <<std::endl;
00371 
00372     if (mMetisPartitioning != DUMB && !PetscTools::IsSequential())
00373     {
00374         assert(this->mNodesPermutation.size() != 0);
00375         // We reorder so that each process owns a contiguous set of the indices and we can then build a distributed vector factory.
00376         ReorderNodes();
00377 
00378         unsigned num_owned;
00379         unsigned rank = PetscTools::GetMyRank();
00380         if ( !PetscTools::AmTopMost() )
00381         {
00382             num_owned =  proc_offsets[rank+1]-proc_offsets[rank];
00383         }
00384         else
00385         {
00386             num_owned = mTotalNumNodes - proc_offsets[rank];
00387         }
00388 
00389         assert(!this->mpDistributedVectorFactory);
00390         this->mpDistributedVectorFactory = new DistributedVectorFactory(this->GetNumNodes(), num_owned);
00391     }
00392     else
00393     {
00394         // Dumb or sequential partition
00395         assert(this->mpDistributedVectorFactory);
00396     }
00397     rMeshReader.Reset();
00398 }
00399 
00400 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00401 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalNodes() const
00402 {
00403     return this->mNodes.size();
00404 }
00405 
00406 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00407 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalElements() const
00408 {
00409     return this->mElements.size();
00410 }
00411 
00412 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00413 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalBoundaryElements() const
00414 {
00415     return this->mBoundaryElements.size();
00416 }
00417 
00418 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00419 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumNodes() const
00420 {
00421     return mTotalNumNodes;
00422 }
00423 
00424 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00425 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumAllNodes() const
00426 {
00427     return mTotalNumNodes;
00428 }
00429 
00430 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00431 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumElements() const
00432 {
00433     return mTotalNumElements;
00434 }
00435 
00436 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00437 typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::PartitionType DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetPartitionType() const
00438 {
00439     return mMetisPartitioning;
00440 }
00441 
00442 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00443 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumBoundaryElements() const
00444 {
00445     return mTotalNumBoundaryElements;
00446 }
00447 
00448 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00449 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIndices(std::vector<unsigned>& rHaloIndices) const
00450 {
00451     //Make sure the output vector is empty
00452     rHaloIndices.clear();
00453     for (unsigned i=0; i<mHaloNodes.size(); i++)
00454     {
00455         rHaloIndices.push_back(mHaloNodes[i]->GetIndex());
00456     }
00457 }
00458 
00459 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00460 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetElementOwnerships(unsigned lo, unsigned hi)
00461 {
00462     // This method exists just to keep compatibility with TetrahedralMesh.
00463     // In parallel, you only create the data structures for the elements you have been assigned.
00464     // Therefore, all the local elements are owned by the processor (obviously...)
00465     // We don't care about "hi" and "lo"
00466     assert(hi>=lo);
00467     for (unsigned element_index=0; element_index<this->mElements.size(); element_index++)
00468     {
00469         Element<ELEMENT_DIM, SPACE_DIM>* p_element=this->mElements[element_index];
00470         p_element->SetOwnership(true);
00471     }
00472 }
00473 
00474 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00475 bool DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfElement( unsigned elementIndex )
00476 {
00477     try
00478     {
00479         return(AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfElement(elementIndex));
00480     }
00481     catch(Exception& e)      // we don't own the element
00482     {
00483         return false;
00484     }
00485 }
00486 
00487 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00488 bool DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfBoundaryElement( unsigned faceIndex )
00489 {
00490     try
00491     {
00492         return(AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfBoundaryElement(faceIndex));
00493     }
00494     catch(Exception& e)      //  we don't own the face
00495     {
00496         return false;
00497     }
00498 }
00499 
00500 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00501 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterNode(unsigned index)
00502 {
00503     mNodesMapping[index] = this->mNodes.size();
00504 }
00505 
00506 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00507 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterHaloNode(unsigned index)
00508 {
00509     mHaloNodesMapping[index] = mHaloNodes.size();
00510 }
00511 
00512 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00513 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterElement(unsigned index)
00514 {
00515     mElementsMapping[index] = this->mElements.size();
00516 }
00517 
00518 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00519 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterBoundaryElement(unsigned index)
00520 {
00521     mBoundaryElementsMapping[index] = this->mBoundaryElements.size();
00522 }
00523 
00524 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00525 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveNodeMapping(unsigned index) const
00526 {
00527     std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index);
00528 
00529     if (node_position == mNodesMapping.end())
00530     {
00531         std::stringstream message;
00532         message << "Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank();
00533         EXCEPTION(message.str().c_str());
00534     }
00535     return node_position->second;
00536 }
00537 
00538 //template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00539 //unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
00540 //{
00541 //    assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
00542 //    return mHaloNodesMapping[index];
00543 //}
00544 
00545 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00546 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveElementMapping(unsigned index) const
00547 {
00548     std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index);
00549 
00550     if (element_position == mElementsMapping.end())
00551     {
00552         std::stringstream message;
00553         message << "Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00554         EXCEPTION(message.str().c_str());
00555     }
00556 
00557     return element_position->second;
00558 }
00559 
00560 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00561 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveBoundaryElementMapping(unsigned index) const
00562 {
00563     std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index);
00564 
00565     if (boundary_element_position == mBoundaryElementsMapping.end())
00566     {
00567         std::stringstream message;
00568         message << "Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00569         EXCEPTION(message.str().c_str());
00570     }
00571 
00572     return boundary_element_position->second;
00573 }
00574 
00575 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00576 Node<SPACE_DIM> * DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNodeOrHaloNode(unsigned index) const
00577 {
00578     std::map<unsigned, unsigned>::const_iterator node_position;
00579     //First search the halo
00580     if ((node_position=mHaloNodesMapping.find(index)) != mHaloNodesMapping.end())
00581     {
00582         return mHaloNodes[node_position->second];
00583     }
00584     //First search the owned node
00585     if ((node_position=mNodesMapping.find(index)) != mNodesMapping.end())
00586     {
00587         //Found an owned node
00588         return this->mNodes[node_position->second];
00589     }
00590     //Not here
00591     std::stringstream message;
00592     message << "Requested node/halo " << index << " does not belong to processor " << PetscTools::GetMyRank();
00593     EXCEPTION(message.str().c_str());
00594 }
00595 
00596 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00597 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DumbNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00598                                                                               std::set<unsigned>& rNodesOwned)
00599 {
00600     if (this->mpDistributedVectorFactory)
00601     {
00602         // A distribution is given by the factory
00603         if (this->mpDistributedVectorFactory->GetProblemSize() != mTotalNumNodes)
00604         {
00605             // Reset stuff
00606             this->mpDistributedVectorFactory = NULL;
00607             this->mTotalNumNodes = 0u;
00608             this->mTotalNumElements = 0u;
00609             this->mTotalNumBoundaryElements = 0u;
00610             EXCEPTION("The distributed vector factory size in the mesh doesn't match the total number of nodes.");
00611         }
00612     }
00613     else
00614     {
00615         this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
00616     }
00617     for (unsigned node_index = this->mpDistributedVectorFactory->GetLow();
00618          node_index < this->mpDistributedVectorFactory->GetHigh();
00619          node_index++)
00620     {
00621          rNodesOwned.insert(node_index);
00622     }
00623 }
00624 
00625 
00626 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00627 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::PetscMatrixPartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00628                                                                                  std::set<unsigned>& rNodesOwned,
00629                                                                                  std::vector<unsigned>& rProcessorsOffset)
00630 {
00631     assert(!PetscTools::IsSequential());
00632     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
00633 
00634     unsigned num_nodes = rMeshReader.GetNumNodes();
00635     PetscInt num_procs = PetscTools::GetNumProcs();
00636     unsigned local_proc_index = PetscTools::GetMyRank();
00637 
00638     unsigned num_elements = rMeshReader.GetNumElements();
00639     unsigned num_local_elements = num_elements / num_procs;
00640     unsigned first_local_element = num_local_elements * local_proc_index;
00641     if (PetscTools::AmTopMost())
00642     {
00643         // Take the excess elements
00644         num_local_elements += num_elements - (num_local_elements * num_procs);
00645     }
00646 
00647     PetscTools::Barrier();
00648     Timer::Reset();    
00649 
00650     /*
00651      * Create PETSc matrix which will have 1 for adjacent nodes.
00652      */
00653     Mat connectivity_matrix;
00655     PetscTools::SetupMat(connectivity_matrix, num_nodes, num_nodes, 54);
00656     
00657     if ( ! rMeshReader.IsFileFormatBinary() )
00658     {        
00659         // Advance the file pointer to the first element I own
00660         for (unsigned element_index = 0; element_index < first_local_element; element_index++)
00661         {
00662             ElementData element_data = rMeshReader.GetNextElementData();
00663         }
00664     }
00665 
00666     // In the loop below we assume that there exist edges between any pair of nodes in an element. This is
00667     // a reasonable assumption for triangles and tetrahedra. This won't be the case for squares or hexahedra 
00668     // (or higher order elements). We allow ELEMENT_DIM smaller than SPACE_DIM in case this is a 2D mesh in
00669     // a 3D space. 
00670     assert(SPACE_DIM >= ELEMENT_DIM);
00671     
00672     for (unsigned element_index = 0; element_index < num_local_elements; element_index++)
00673     {
00674         ElementData element_data;
00675         
00676         if ( rMeshReader.IsFileFormatBinary() )
00677         {
00678             element_data = rMeshReader.GetElementData(first_local_element + element_index);
00679         }
00680         else
00681         {
00682             element_data = rMeshReader.GetNextElementData();
00683         }        
00684 
00685         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00686         {
00687             for (unsigned j=0; j<i; j++)
00688             {
00689                 unsigned row = element_data.NodeIndices[i];
00690                 unsigned col = element_data.NodeIndices[j];
00691                 MatSetValue(connectivity_matrix, row, col, 1.0, INSERT_VALUES);
00692                 MatSetValue(connectivity_matrix, col, row, 1.0, INSERT_VALUES);
00693             }
00694         }
00695     }
00697     MatAssemblyBegin(connectivity_matrix, MAT_FINAL_ASSEMBLY);
00698     MatAssemblyEnd(connectivity_matrix, MAT_FINAL_ASSEMBLY);
00699 
00700     PetscTools::Barrier();
00701     if(PetscTools::AmMaster())
00702     {
00703         Timer::PrintAndReset("Connectivity matrix assembly");
00704     }    
00705 
00706     rMeshReader.Reset();   
00707 
00708     PetscInt connectivity_matrix_lo;
00709     PetscInt connectivity_matrix_hi;
00710     MatGetOwnershipRange(connectivity_matrix, &connectivity_matrix_lo, &connectivity_matrix_hi);
00711 
00712     unsigned num_local_nodes = connectivity_matrix_hi - connectivity_matrix_lo;
00713 
00714     MatInfo matrix_info;
00715     MatGetInfo(connectivity_matrix, MAT_LOCAL, &matrix_info);
00716     unsigned local_num_nz = (unsigned) matrix_info.nz_used;
00717 
00718     size_t size = (num_local_nodes+1)*sizeof(PetscInt);
00719     void* ptr;
00720     PetscMalloc(size, &ptr);
00721     PetscInt* local_ia = (PetscInt*) ptr;
00722     size = local_num_nz*sizeof(PetscInt);
00723     PetscMalloc(size, &ptr);
00724     PetscInt* local_ja = (PetscInt*) ptr;
00725 
00726     PetscInt row_num_nz;
00727     const PetscInt* column_indices;
00728         
00729     local_ia[0]=0;
00730     for (PetscInt row_global_index=connectivity_matrix_lo; row_global_index<connectivity_matrix_hi; row_global_index++) 
00731     {
00732         MatGetRow(connectivity_matrix, row_global_index, &row_num_nz, &column_indices, NULL);
00733 
00734         unsigned row_local_index = row_global_index - connectivity_matrix_lo;       
00735         local_ia[row_local_index+1] = local_ia[row_local_index] + row_num_nz;        
00736         for (PetscInt col_index=0; col_index<row_num_nz; col_index++)
00737         {
00738            local_ja[local_ia[row_local_index] + col_index] =  column_indices[col_index];
00739         }        
00740 
00741         MatRestoreRow(connectivity_matrix, row_global_index, &row_num_nz,&column_indices, NULL);
00742     }
00743     
00744     MatDestroy(connectivity_matrix);
00745 
00746     // Convert to an adjacency matrix
00747     Mat adj_matrix;
00748     MatCreateMPIAdj(PETSC_COMM_WORLD, num_local_nodes, num_nodes, local_ia, local_ja, NULL, &adj_matrix);
00749 
00750     PetscTools::Barrier();
00751     if(PetscTools::AmMaster())
00752     {
00753         Timer::PrintAndReset("Adjacency matrix creation");
00754     }    
00755 
00756     // Get PETSc to call ParMETIS
00757     MatPartitioning part;
00758     MatPartitioningCreate(PETSC_COMM_WORLD, &part);
00759     MatPartitioningSetAdjacency(part, adj_matrix);
00760     MatPartitioningSetFromOptions(part);
00761     IS new_process_numbers;
00762     MatPartitioningApply(part, &new_process_numbers);
00763     MatPartitioningDestroy(part);
00764     
00766     MatDestroy(adj_matrix);
00767 
00768     PetscTools::Barrier();
00769     if(PetscTools::AmMaster())
00770     {
00771         Timer::PrintAndReset("PETSc-ParMETIS call");
00772     }    
00773                 
00774     // Figure out who owns what - processor offsets
00775     PetscInt* num_nodes_per_process = new PetscInt[num_procs];        
00776 #if (PETSC_VERSION_MAJOR == 3) //PETSc 3.x.x
00777     ISPartitioningCount(new_process_numbers, num_procs, num_nodes_per_process);
00778 #else
00779     ISPartitioningCount(new_process_numbers, num_nodes_per_process);
00780 #endif
00781        
00782     rProcessorsOffset.resize(num_procs);
00783     rProcessorsOffset[0] = 0;
00784     for (PetscInt i=1; i<num_procs; i++)
00785     {
00786         rProcessorsOffset[i] = rProcessorsOffset[i-1] + num_nodes_per_process[i-1];
00787     }
00788     unsigned my_num_nodes = num_nodes_per_process[local_proc_index];
00789     delete[] num_nodes_per_process;
00790     
00791     // Figure out who owns what - new node numbering
00792     IS new_global_node_indices;
00793     ISPartitioningToNumbering(new_process_numbers, &new_global_node_indices);
00794     
00795     // Index sets only give local information, we want global
00796     AO ordering;
00797     AOCreateBasicIS(new_global_node_indices, PETSC_NULL /* natural ordering */, &ordering);
00798     
00799     // The locally owned range under the new numbering
00800     PetscInt* local_range = new PetscInt[my_num_nodes];
00801     for (unsigned i=0; i<my_num_nodes; i++)
00802     {
00803         local_range[i] = rProcessorsOffset[local_proc_index] + i;
00804     }
00805     AOApplicationToPetsc(ordering, my_num_nodes, local_range);    
00806     //AOView(ordering, PETSC_VIEWER_STDOUT_WORLD);
00807         
00808     // Fill in rNodesOwned (TODO: do something smarter with iterators...)
00809     for (unsigned i=0; i<my_num_nodes; i++)
00810     {
00811         rNodesOwned.insert(local_range[i]);
00812     }   
00813     delete[] local_range;    
00814     
00815     /*
00816      *  Once we know the offsets we can compute the permutation vector
00817      */
00818     PetscInt* global_range = new PetscInt[num_nodes];
00819     for (unsigned i=0; i<num_nodes; i++)
00820     {
00821         global_range[i] = i;
00822     }
00823     AOPetscToApplication(ordering, num_nodes, global_range);
00824 
00825     this->mNodesPermutation.resize(num_nodes);
00826 
00827     for (unsigned node_index=0; node_index<num_nodes; node_index++)
00828     {
00829         this->mNodesPermutation[node_index] = global_range[node_index];
00830     }
00831     delete[] global_range;
00832 
00833     AODestroy(ordering);
00834     ISDestroy(new_process_numbers);
00835     ISDestroy(new_global_node_indices);
00836     
00837     PetscTools::Barrier();
00838     if(PetscTools::AmMaster())
00839     {
00840         Timer::PrintAndReset("PETSc-ParMETIS output manipulation");
00841     }    
00842 }
00843 
00844 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00845 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::MetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00846                                                                                   std::set<unsigned>& rNodesOwned,
00847                                                                                   std::vector<unsigned>& rProcessorsOffset)
00848 {
00849     assert(!PetscTools::IsSequential());
00850 
00851     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
00852 
00853     int nn = rMeshReader.GetNumNodes();
00854     idxtype* npart = new idxtype[nn];
00855     assert(npart != NULL);
00856 
00857     //Only the master process will access the element data and perform the partitioning
00858     if (PetscTools::AmMaster())
00859     {
00860         int ne = rMeshReader.GetNumElements();
00861         idxtype* elmnts = new idxtype[ne * (ELEMENT_DIM+1)];
00862         assert(elmnts != NULL);
00863 
00864         unsigned counter=0;
00865         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00866         {
00867             ElementData element_data = rMeshReader.GetNextElementData();
00868 
00869             for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00870             {
00871                 elmnts[counter++] = element_data.NodeIndices[i];
00872             }
00873         }
00874         rMeshReader.Reset();
00875 
00876         int etype;
00877 
00878         switch (ELEMENT_DIM)
00879         {
00880             case 2:
00881                 etype = 1; //1 is Metis speak for triangles
00882                 break;
00883             case 3:
00884                 etype = 2; //2 is Metis speak for tetrahedra
00885                 break;
00886             default:
00887                 NEVER_REACHED;
00888         }
00889 
00890         int numflag = 0; //0 means C-style numbering is assumed
00891         int nparts = PetscTools::GetNumProcs();
00892         int edgecut;
00893         idxtype* epart = new idxtype[ne];
00894         assert(epart != NULL);
00895 
00896         Timer::Reset();
00897         METIS_PartMeshNodal(&ne, &nn, elmnts, &etype, &numflag, &nparts, &edgecut, epart, npart);//, wgetflag, vwgt);
00898         //Timer::Print("METIS call");
00899 
00900         delete[] elmnts;
00901         delete[] epart;
00902     }
00903 
00904     //Here's the new bottle-neck: share all the node ownership data
00905     //idxtype is normally int (see metis-4.0/Lib/struct.h 17-22)
00906     assert(sizeof(idxtype) == sizeof(int));
00907     MPI_Bcast(npart /*data*/, nn /*size*/, MPI_INT, 0 /*From Master*/, PETSC_COMM_WORLD);
00908 
00909     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
00910     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
00911 
00912     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00913     {
00914         unsigned part_read = npart[node_index];
00915 
00916         // METIS output says I own this node
00917         if (part_read == PetscTools::GetMyRank())
00918         {
00919             rNodesOwned.insert(node_index);
00920         }
00921 
00922         // Offset is defined as the first node owned by a processor. We compute it incrementally.
00923         // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
00924         // offset a position.
00925         for (unsigned proc=part_read+1; proc<PetscTools::GetNumProcs(); proc++)
00926         {
00927             rProcessorsOffset[proc]++;
00928         }
00929     }
00930 
00931     /*
00932      *  Once we know the offsets we can compute the permutation vector
00933      */
00934     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
00935 
00936     this->mNodesPermutation.resize(this->GetNumNodes());
00937 
00938     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00939     {
00940         unsigned part_read = npart[node_index];
00941 
00942         this->mNodesPermutation[node_index] = rProcessorsOffset[part_read] + local_index[part_read];
00943 
00944         local_index[part_read]++;
00945     }
00946 
00947     delete[] npart;
00948 }
00949 
00950 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00951 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ReorderNodes()
00952 {
00953     assert(!PetscTools::IsSequential());
00954 
00955     // Need to rebuild global-local maps
00956     mNodesMapping.clear();
00957     mHaloNodesMapping.clear();
00958 
00959     // Update indices
00960     for (unsigned index=0; index<this->mNodes.size(); index++)
00961     {
00962         unsigned old_index = this->mNodes[index]->GetIndex();
00963         unsigned new_index = this->mNodesPermutation[old_index];
00964 
00965         this->mNodes[index]->SetIndex(new_index);
00966         mNodesMapping[new_index] = index;
00967     }
00968 
00969     for (unsigned index=0; index<mHaloNodes.size(); index++)
00970     {
00971         unsigned old_index = mHaloNodes[index]->GetIndex();
00972         unsigned new_index = this->mNodesPermutation[old_index];
00973 
00974         mHaloNodes[index]->SetIndex(new_index);
00975         mHaloNodesMapping[new_index] = index;
00976     }
00977 }
00978 
00979 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00980 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructLinearMesh(unsigned width)
00981 {
00982     assert(ELEMENT_DIM == 1);
00983 
00984      //Check that there are enough nodes to make the parallelisation worthwhile
00985     if (width==0)
00986     {
00987         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
00988     }
00989     //Use dumb partition so that archiving doesn't permute anything
00990     mMetisPartitioning=DUMB;
00991     mTotalNumNodes=width+1;
00992     mTotalNumBoundaryElements=2u;
00993     mTotalNumElements=width;
00994 
00995     //Use DistributedVectorFactory to make a dumb partition of the nodes
00996     assert(!this->mpDistributedVectorFactory);
00997     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
00998     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
00999     {
01000         //It's a short mesh and this process owns no nodes
01001         return;
01002     }
01003 
01004     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01005      * higher numbered process may have dropped out of this construction altogether
01006      * (because is has no local ownership)
01007      */
01008     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01009 
01010     unsigned lo_node=this->mpDistributedVectorFactory->GetLow();
01011     unsigned hi_node=this->mpDistributedVectorFactory->GetHigh();
01012     if (!PetscTools::AmMaster())
01013     {
01014         //Allow for a halo node
01015         lo_node--;
01016     }
01017     if (!am_top_most)
01018     {
01019         //Allow for a halo node
01020         hi_node++;
01021     }
01022     Node<SPACE_DIM>* p_old_node=NULL;
01023     for (unsigned node_index=lo_node; node_index<hi_node; node_index++)
01024     {
01025         // create node or halo-node
01026         Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, node_index==0 || node_index==width, node_index);
01027         if (node_index<this->mpDistributedVectorFactory->GetLow() ||
01028             node_index==this->mpDistributedVectorFactory->GetHigh() )
01029         {
01030             //Beyond left or right it's a halo node
01031             RegisterHaloNode(node_index);
01032             mHaloNodes.push_back(p_node);
01033         }
01034         else
01035         {
01036             RegisterNode(node_index);
01037             this->mNodes.push_back(p_node); // create node
01038 
01039             //A boundary face has to be wholely owned by the process
01040             //Since, when ELEMENT_DIM>1 we have *at least* boundary node as a non-halo
01041             if (node_index==0) // create left boundary node and boundary element
01042             {
01043                 this->mBoundaryNodes.push_back(p_node);
01044                 RegisterBoundaryElement(0);
01045                 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(0, p_node) );
01046             }
01047             if (node_index==width) // create right boundary node and boundary element
01048             {
01049                 this->mBoundaryNodes.push_back(p_node);
01050                 RegisterBoundaryElement(1);
01051                 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(1, p_node) );
01052             }
01053             }
01054         if (node_index>lo_node) // create element
01055         {
01056             std::vector<Node<SPACE_DIM>*> nodes;
01057             nodes.push_back(p_old_node);
01058             nodes.push_back(p_node);
01059             RegisterElement(node_index-1);
01060             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(node_index-1, nodes) );
01061         }
01062         //Keep track of the node which we've just created
01063         p_old_node=p_node;
01064     }
01065 }
01066 
01067 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01068 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructRectangularMesh(unsigned width, unsigned height, bool stagger)
01069 {
01070     assert(SPACE_DIM == 2);
01071     assert(ELEMENT_DIM == 2);
01072     //Check that there are enough nodes to make the parallelisation worthwhile
01073     if (height==0)
01074     {
01075         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01076     }
01077     //Use dumb partition so that archiving doesn't permute anything
01078     mMetisPartitioning=DUMB;
01079 
01080     mTotalNumNodes=(width+1)*(height+1);
01081     mTotalNumBoundaryElements=(width+height)*2;
01082     mTotalNumElements=width*height*2;
01083 
01084     //Use DistributedVectorFactory to make a dumb partition of space
01085     DistributedVectorFactory y_partition(height+1);
01086     unsigned lo_y = y_partition.GetLow();
01087     unsigned hi_y = y_partition.GetHigh();
01088     //Dumb partition of nodes has to be such that each process gets complete slices
01089     assert(!this->mpDistributedVectorFactory);
01090     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*y_partition.GetLocalOwnership());
01091     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01092     {
01093         //It's a short mesh and this process owns no nodes
01094         return;
01095     }
01096     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01097      * higher numbered process may have dropped out of this construction altogether
01098      * (because is has no local ownership)
01099      */
01100     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01101 
01102 
01103     if (!PetscTools::AmMaster())
01104     {
01105         //Allow for a halo node
01106         lo_y--;
01107     }
01108     if (!am_top_most)
01109     {
01110         //Allow for a halo node
01111         hi_y++;
01112     }
01113 
01114     //Construct the nodes
01115     for (unsigned j=lo_y; j<hi_y; j++)
01116     {
01117         for (unsigned i=0; i<width+1; i++)
01118         {
01119             bool is_boundary=false;
01120             if (i==0 || j==0 || i==width || j==height)
01121             {
01122                 is_boundary=true;
01123             }
01124             unsigned global_node_index=((width+1)*(j) + i); //Verified from sequential
01125             Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j);
01126             if (j<y_partition.GetLow() || j==y_partition.GetHigh() )
01127             {
01128                 //Beyond left or right it's a halo node
01129                 RegisterHaloNode(global_node_index);
01130                 mHaloNodes.push_back(p_node);
01131             }
01132             else
01133             {
01134                 RegisterNode(global_node_index);
01135                 this->mNodes.push_back(p_node);
01136             }
01137             if (is_boundary)
01138             {
01139                 this->mBoundaryNodes.push_back(p_node);
01140             }
01141         }
01142     }
01143 
01144     //Construct the boundary elements
01145     unsigned belem_index;
01146     //Top
01147     if (am_top_most)
01148     {
01149        for (unsigned i=0; i<width; i++)
01150        {
01151             std::vector<Node<SPACE_DIM>*> nodes;
01152             nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i ));
01153             nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i+1 ));
01154             belem_index=i;
01155             RegisterBoundaryElement(belem_index);
01156             this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01157         }
01158     }
01159 
01160     //Right
01161     for (unsigned j=lo_y+1; j<hi_y; j++)
01162     {
01163         std::vector<Node<SPACE_DIM>*> nodes;
01164         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1)-1 ));
01165         nodes.push_back(GetNodeOrHaloNode( (width+1)*j-1 ));
01166         belem_index=width+j-1;
01167         RegisterBoundaryElement(belem_index);
01168         this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01169     }
01170 
01171     //Bottom
01172     if (PetscTools::AmMaster())
01173     {
01174         for (unsigned i=0; i<width; i++)
01175         {
01176             std::vector<Node<SPACE_DIM>*> nodes;
01177             nodes.push_back(GetNodeOrHaloNode( i+1 ));
01178             nodes.push_back(GetNodeOrHaloNode( i ));
01179             belem_index=width+height+i;
01180             RegisterBoundaryElement(belem_index);
01181             this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01182         }
01183     }
01184 
01185     //Left
01186     for (unsigned j=lo_y; j<hi_y-1; j++)
01187     {
01188         std::vector<Node<SPACE_DIM>*> nodes;
01189         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1) ));
01190         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j) ));
01191         belem_index=2*width+height+j;
01192         RegisterBoundaryElement(belem_index);
01193         this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01194     }
01195 
01196 
01197     //Construct the elements
01198     unsigned elem_index;
01199     for (unsigned j=lo_y; j<hi_y-1; j++)
01200     {
01201         for (unsigned i=0; i<width; i++)
01202         {
01203             unsigned parity=(i+(height-j))%2;//Note that parity is measured from the top-left (not bottom left) for historical reasons
01204             unsigned nw=(j+1)*(width+1)+i; //ne=nw+1
01205             unsigned sw=(j)*(width+1)+i;   //se=sw+1
01206             std::vector<Node<SPACE_DIM>*> upper_nodes;
01207             upper_nodes.push_back(GetNodeOrHaloNode( nw ));
01208             upper_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
01209             if (stagger==false  || parity == 1)
01210             {
01211                 upper_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
01212             }
01213             else
01214             {
01215                 upper_nodes.push_back(GetNodeOrHaloNode( sw ));
01216             }
01217             elem_index=2*(j*width+i);
01218             RegisterElement(elem_index);
01219             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,upper_nodes));
01220             std::vector<Node<SPACE_DIM>*> lower_nodes;
01221             lower_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
01222             lower_nodes.push_back(GetNodeOrHaloNode( sw ));
01223             if (stagger==false  ||parity == 1)
01224             {
01225                 lower_nodes.push_back(GetNodeOrHaloNode( nw ));
01226             }
01227             else
01228             {
01229                 lower_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
01230             }
01231             elem_index++;
01232             RegisterElement(elem_index);
01233             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,lower_nodes));
01234         }
01235     }
01236 
01237 }
01238 
01239 
01240 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01241 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructCuboid(unsigned width,
01242         unsigned height,
01243         unsigned depth)
01244 {
01245     assert(SPACE_DIM == 3);
01246     assert(ELEMENT_DIM == 3);
01247     //Check that there are enough nodes to make the parallelisation worthwhile
01248     if (depth==0)
01249     {
01250         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01251     }
01252 
01253     //Use dumb partition so that archiving doesn't permute anything
01254     mMetisPartitioning=DUMB;
01255 
01256     mTotalNumNodes=(width+1)*(height+1)*(depth+1);
01257     mTotalNumBoundaryElements=((width*height)+(width*depth)+(height*depth))*4;//*2 for top-bottom, *2 for tessellating each unit square
01258     mTotalNumElements=width*height*depth*6;
01259 
01260     //Use DistributedVectorFactory to make a dumb partition of space
01261     DistributedVectorFactory z_partition(depth+1);
01262     unsigned lo_z = z_partition.GetLow();
01263     unsigned hi_z = z_partition.GetHigh();
01264 
01265     //Dumb partition of nodes has to be such that each process gets complete slices
01266     assert(!this->mpDistributedVectorFactory);
01267     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*(height+1)*z_partition.GetLocalOwnership());
01268     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01269     {
01270         //It's a short mesh and this process owns no nodes
01271        return;
01272     }
01273     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01274      * higher numbered process may have dropped out of this construction altogether
01275      * (because is has no local ownership)
01276      */
01277     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01278 
01279 
01280 
01281     if (!PetscTools::AmMaster())
01282     {
01283         //Allow for a halo node
01284         lo_z--;
01285     }
01286     if (!am_top_most)
01287     {
01288         //Allow for a halo node
01289         hi_z++;
01290     }
01291 
01292     //Construct the nodes
01293     unsigned global_node_index;
01294     for (unsigned k=lo_z; k<hi_z; k++)
01295     {
01296         for (unsigned j=0; j<height+1; j++)
01297         {
01298             for (unsigned i=0; i<width+1; i++)
01299             {
01300                 bool is_boundary = false;
01301                 if (i==0 || j==0 || k==0 || i==width || j==height || k==depth)
01302                 {
01303                     is_boundary = true;
01304                 }
01305                 global_node_index = (k*(height+1)+j)*(width+1)+i;
01306 
01307                 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j, k);
01308 
01309                 if (k<z_partition.GetLow() || k==z_partition.GetHigh() )
01310                 {
01311                     //Beyond left or right it's a halo node
01312                     RegisterHaloNode(global_node_index);
01313                     mHaloNodes.push_back(p_node);
01314                 }
01315                 else
01316                 {
01317                     RegisterNode(global_node_index);
01318                     this->mNodes.push_back(p_node);
01319                 }
01320 
01321                 if (is_boundary)
01322                 {
01323                     this->mBoundaryNodes.push_back(p_node);
01324                 }
01325             }
01326         }
01327     }
01328 
01329     // Construct the elements
01330 
01331     unsigned element_nodes[6][4] = {{0, 1, 5, 7}, {0, 1, 3, 7},
01332                                         {0, 2, 3, 7}, {0, 2, 6, 7},
01333                                         {0, 4, 6, 7}, {0, 4, 5, 7}};
01334     std::vector<Node<SPACE_DIM>*> tetrahedra_nodes;
01335 
01336     for (unsigned k=lo_z; k<hi_z-1; k++)
01337     {
01338         unsigned belem_index = 0;
01339         if (k != 0)
01340         {
01341             // height*width squares on upper face, k layers of 2*height+2*width square aroun
01342             belem_index =   2*(height*width+k*2*(height+width));
01343         }
01344 
01345         for (unsigned j=0; j<height; j++)
01346         {
01347             for (unsigned i=0; i<width; i++)
01348             {
01349                 // Compute the nodes' index
01350                 unsigned global_node_indices[8];
01351                 unsigned local_node_index = 0;
01352 
01353                 for (unsigned z = 0; z < 2; z++)
01354                 {
01355                     for (unsigned y = 0; y < 2; y++)
01356                     {
01357                         for (unsigned x = 0; x < 2; x++)
01358                         {
01359                             global_node_indices[local_node_index] = i+x+(width+1)*(j+y+(height+1)*(k+z));
01360 
01361                             local_node_index++;
01362                         }
01363                     }
01364                 }
01365 
01366                 for (unsigned m = 0; m < 6; m++)
01367                 {
01368                     // Tetrahedra #m
01369 
01370                     tetrahedra_nodes.clear();
01371 
01372                     for (unsigned n = 0; n < 4; n++)
01373                     {
01374                         tetrahedra_nodes.push_back(GetNodeOrHaloNode( global_node_indices[element_nodes[m][n]] ));
01375                     }
01376                     unsigned elem_index = 6 * ((k*height+j)*width+i)+m;
01377                     RegisterElement(elem_index);
01378                     this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index, tetrahedra_nodes));
01379                 }
01380 
01381                 //Are we at a boundary?
01382                 std::vector<Node<SPACE_DIM>*> triangle_nodes;
01383 
01384                 if (i == 0) //low face at x==0
01385                 {
01386                     triangle_nodes.clear();
01387                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01388                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01389                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01390                     RegisterBoundaryElement(belem_index);
01391                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01392                     triangle_nodes.clear();
01393                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01394                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01395                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01396                     RegisterBoundaryElement(belem_index);
01397                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01398                 }
01399                 if (i == width-1) //high face at x=width
01400                 {
01401                     triangle_nodes.clear();
01402                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01403                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01404                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01405                     RegisterBoundaryElement(belem_index);
01406                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01407                     triangle_nodes.clear();
01408                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01409                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01410                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01411                     RegisterBoundaryElement(belem_index);
01412                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01413                 }
01414                 if (j == 0) //low face at y==0
01415                 {
01416                     triangle_nodes.clear();
01417                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01418                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01419                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01420                     RegisterBoundaryElement(belem_index);
01421                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01422                     triangle_nodes.clear();
01423                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01424                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01425                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01426                     RegisterBoundaryElement(belem_index);
01427                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01428                 }
01429                 if (j == height-1) //high face at y=height
01430                 {
01431                     triangle_nodes.clear();
01432                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01433                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01434                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01435                     RegisterBoundaryElement(belem_index);
01436                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01437                     triangle_nodes.clear();
01438                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01439                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01440                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01441                     RegisterBoundaryElement(belem_index);
01442                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01443                 }
01444                 if (k == 0) //low face at z==0
01445                 {
01446                     triangle_nodes.clear();
01447                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01448                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01449                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01450                     RegisterBoundaryElement(belem_index);
01451                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01452                     triangle_nodes.clear();
01453                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01454                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01455                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01456                     RegisterBoundaryElement(belem_index);
01457                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01458                 }
01459                 if (k == depth-1) //high face at z=depth
01460                 {
01461                     triangle_nodes.clear();
01462                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01463                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01464                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01465                     RegisterBoundaryElement(belem_index);
01466                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01467                     triangle_nodes.clear();
01468                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01469                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01470                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01471                     RegisterBoundaryElement(belem_index);
01472                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01473                 }
01474             }//i
01475         }//j
01476     }//k
01477 }
01478 
01479 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01480 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(const double xFactor, const double yFactor, const double zFactor)
01481 {
01482     //Base class scale (scales node positions)
01483     AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(xFactor, yFactor, zFactor);
01484     //Scales halos
01485     for (unsigned i=0; i<mHaloNodes.size(); i++)
01486     {
01487         c_vector<double, SPACE_DIM>& r_location = mHaloNodes[i]->rGetModifiableLocation();
01488         if (SPACE_DIM>=3)
01489         {
01490             r_location[2] *= zFactor;
01491         }
01492         if (SPACE_DIM>=2)
01493         {
01494             r_location[1] *= yFactor;
01495         }
01496         r_location[0] *= xFactor;
01497     }
01498 
01499 }
01500 
01501 
01502 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01503 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ParMetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
01504                                                                                        std::set<unsigned>& rElementsOwned,
01505                                                                                        std::set<unsigned>& rNodesOwned,
01506                                                                                        std::set<unsigned>& rHaloNodesOwned,
01507                                                                                        std::vector<unsigned>& rProcessorsOffset)
01508 {
01509     assert(!PetscTools::IsSequential());
01510     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
01511 
01512     const unsigned num_elements = rMeshReader.GetNumElements();
01513     const unsigned num_procs = PetscTools::GetNumProcs();
01514     const unsigned local_proc_index = PetscTools::GetMyRank();
01515 
01516     /*
01517      *  Work out initial element distribution
01518      */
01519     idxtype element_distribution[num_procs+1];
01520     idxtype element_count[num_procs];
01521 
01522     element_distribution[0]=0;
01523 
01524     for (unsigned proc_index=1; proc_index<num_procs; proc_index++)
01525     {
01526         element_distribution[proc_index] = element_distribution[proc_index-1] + num_elements/num_procs;
01527         element_count[proc_index-1] = element_distribution[proc_index] - element_distribution[proc_index-1];
01528     }
01529 
01530     element_distribution[num_procs] = num_elements;
01531     element_count[num_procs-1] = element_distribution[num_procs] - element_distribution[num_procs-1];
01532 
01533     /*
01534      *  Create distributed mesh data structure
01535      */
01536     unsigned first_local_element = element_distribution[local_proc_index];
01537     unsigned last_plus_one_element = element_distribution[local_proc_index+1];
01538     unsigned num_local_elements = last_plus_one_element - first_local_element;
01539 
01540     idxtype* eind = new idxtype[num_local_elements*(ELEMENT_DIM+1)];
01541     idxtype* eptr = new idxtype[num_local_elements+1];
01542 
01543     if ( ! rMeshReader.IsFileFormatBinary() )
01544     {
01545         // Advance the file pointer to the first element I own.
01546         for (unsigned element_index = 0; element_index < first_local_element; element_index++)
01547         {
01548             ElementData element_data = rMeshReader.GetNextElementData();
01549         }
01550     }
01551 
01552     unsigned counter=0;
01553     for (unsigned element_index = 0; element_index < num_local_elements; element_index++)
01554     {
01555         ElementData element_data;
01556         
01557         if ( rMeshReader.IsFileFormatBinary() )
01558         {
01559             element_data = rMeshReader.GetElementData(first_local_element + element_index);
01560         }
01561         else
01562         {
01563             element_data = rMeshReader.GetNextElementData();
01564         }        
01565 
01566         eptr[element_index] = counter;
01567         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01568         {
01569             eind[counter++] = element_data.NodeIndices[i];
01570         }
01571 
01572     }
01573     eptr[num_local_elements] = counter;
01574 
01575     rMeshReader.Reset();
01576 
01577     int numflag = 0; // METIS speak for C-style numbering
01578     int ncommonnodes = 3; // Connectivity degree. Manual recommends 3 for meshes made exclusively of tetrahedra.
01579     MPI_Comm communicator = PETSC_COMM_WORLD;
01580 
01581     idxtype* xadj;
01582     idxtype* adjncy;
01583 
01584     Timer::Reset();
01585     ParMETIS_V3_Mesh2Dual(element_distribution, eptr, eind,
01586                           &numflag, &ncommonnodes, &xadj, &adjncy, &communicator);
01587     //Timer::Print("ParMETIS Mesh2Dual");
01588 
01589     delete[] eind;
01590     delete[] eptr;
01591 
01592     int weight_flag = 0; // unweighted graph
01593     int n_constrains = 0; // number of weights that each vertex has (number of balance constrains)
01594     int n_subdomains = PetscTools::GetNumProcs();
01595     int options[3]; // extra options
01596     options[0] = 0; // ignore extra options
01597     int edgecut;
01598 
01599     idxtype* local_partition = new idxtype[num_local_elements];
01600 
01601 /*
01602  *  In order to use ParMETIS_V3_PartGeomKway, we need to sort out how to compute the coordinates of the
01603  *  centers of each element efficiently.
01604  *
01605  *  In the meantime use ParMETIS_V3_PartKway.
01606  */
01607 //    int n_dimensions = ELEMENT_DIM;
01608 //    float node_coordinates[num_local_elements*SPACE_DIM];
01609 //
01610 //    ParMETIS_V3_PartGeomKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
01611 //                             &n_dimensions, node_coordinates, &n_constrains, &n_subdomains, NULL, NULL,
01612 //                             options, &edgecut, local_partition, &communicator);
01613 
01614     Timer::Reset();
01615     ParMETIS_V3_PartKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
01616                          &n_constrains, &n_subdomains, NULL, NULL,
01617                          options, &edgecut, local_partition, &communicator);
01618     //Timer::Print("ParMETIS PartKway");
01619 
01620 
01621     idxtype* global_element_partition = new idxtype[num_elements];
01622 
01623     MPI_Allgatherv(local_partition, num_local_elements, MPI_INT,
01624                    global_element_partition, element_count, element_distribution, MPI_INT, PETSC_COMM_WORLD);
01625 
01626     delete[] local_partition;
01627 
01628     for(unsigned elem_index=0; elem_index<num_elements; elem_index++)
01629     {
01630         if ((unsigned) global_element_partition[elem_index] == local_proc_index)
01631         {
01632             rElementsOwned.insert(elem_index);
01633         }
01634     }
01635 
01636     rMeshReader.Reset();
01637     free(xadj);
01638     free(adjncy);
01639 
01640     unsigned num_nodes = rMeshReader.GetNumNodes();
01641 
01642     //unsigned global_node_partition[num_nodes]; // initialised to UNASSIGNED (do #define UNASSIGNED -1
01643     std::vector<unsigned> global_node_partition;
01644     global_node_partition.resize(num_nodes, UNASSIGNED_NODE);
01645 
01646     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
01647     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
01648 
01649 
01650     /*
01651      *  Work out node distribution based on initial element distribution returned by ParMETIS
01652      *
01653      *  In this loop we handle 4 different data structures:
01654      *      global_node_partition and rProcessorsOffset are global,
01655      *      rNodesOwned and rHaloNodesOwned are local.
01656      */
01657     
01658     std::vector<unsigned> element_access_order;
01659 
01660     if ( rMeshReader.IsFileFormatBinary() )
01661     {     
01662         RandomNumberGenerator* p_gen = RandomNumberGenerator::Instance();
01663         p_gen->Reseed(0);
01664         p_gen->Shuffle(mTotalNumElements,element_access_order);
01665     }
01666     else
01667     {
01668         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
01669         {
01670             element_access_order.push_back(element_number);
01671         }
01672     }
01673     
01674     
01675     for (unsigned element_count = 0; element_count < mTotalNumElements; element_count++)
01676     {
01677         unsigned element_number = element_access_order[element_count];
01678         unsigned element_owner = global_element_partition[element_number];
01679         
01680         ElementData element_data;
01681         
01682         if ( rMeshReader.IsFileFormatBinary() )
01683         {
01684             element_data = rMeshReader.GetElementData(element_number);
01685         }
01686         else
01687         {
01688             element_data = rMeshReader.GetNextElementData();
01689         }        
01690 
01691         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01692         {
01693             /*
01694              *  For each node in this element, check whether it hasn't been assigned to another processor yet.
01695              *  If so, assign it to the owner the element. Otherwise, consider it halo.
01696              */
01697             if( global_node_partition[element_data.NodeIndices[i]] == UNASSIGNED_NODE )
01698             {
01699                 if (element_owner == local_proc_index)
01700                 {
01701                     rNodesOwned.insert(element_data.NodeIndices[i]);
01702                 }
01703 
01704                 global_node_partition[element_data.NodeIndices[i]] = element_owner;
01705 
01706                 // Offset is defined as the first node owned by a processor. We compute it incrementally.
01707                 // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
01708                 // offset a position.
01709                 for (unsigned proc=element_owner+1; proc<PetscTools::GetNumProcs(); proc++)
01710                 {
01711                     rProcessorsOffset[proc]++;
01712                 }
01713             }
01714             else
01715             {
01716                 if (element_owner == local_proc_index)
01717                 {
01718                     //if (rNodesOwned.find(element_data.NodeIndices[i]) == rNodesOwned.end())
01719                     if (global_node_partition[element_data.NodeIndices[i]] != local_proc_index)
01720                     {
01721                         rHaloNodesOwned.insert(element_data.NodeIndices[i]);
01722                     }
01723                 }
01724             }
01725         }
01726     }
01727 
01728     delete[] global_element_partition;
01729 
01730     /*
01731      *  Refine element distribution. Add extra elements that parMETIS didn't consider initially but
01732      *  include any node owned by the processor. This ensures that all the system matrix rows are
01733      *  assembled locally.
01734      */
01735     rMeshReader.Reset();
01736 
01737     for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
01738     {
01739         ElementData element_data = rMeshReader.GetNextElementData();
01740 
01741         bool element_owned = false;
01742         std::set<unsigned> temp_halo_nodes;
01743 
01744         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01745         {
01746             if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
01747             {
01748                 element_owned = true;
01749                 rElementsOwned.insert(element_number);
01750             }
01751             else
01752             {
01753                 temp_halo_nodes.insert(element_data.NodeIndices[i]);
01754             }
01755         }
01756 
01757         if (element_owned)
01758         {
01759             rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
01760         }
01761     }
01762 
01763     rMeshReader.Reset();
01764 
01765     /*
01766      *  Once we know the offsets we can compute the permutation vector
01767      */
01768     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
01769 
01770     this->mNodesPermutation.resize(this->GetNumNodes());
01771 
01772     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
01773     {
01774         unsigned partition = global_node_partition[node_index];
01775         assert(partition!=UNASSIGNED_NODE);
01776 
01777         this->mNodesPermutation[node_index] = rProcessorsOffset[partition] + local_index[partition];
01778 
01779         local_index[partition]++;
01780     }
01781 
01782 }
01783 
01784 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01785 ChasteCuboid<SPACE_DIM> DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateBoundingBox() const
01786 {
01787 
01788     ChasteCuboid<SPACE_DIM> my_box=AbstractMesh<ELEMENT_DIM, SPACE_DIM>::CalculateBoundingBox();
01789     ChastePoint<SPACE_DIM> my_minimum_point=my_box.rGetLowerCorner();
01790     ChastePoint<SPACE_DIM> my_maximum_point=my_box.rGetUpperCorner();
01791     
01792     c_vector<double, SPACE_DIM> global_minimum_point;
01793     c_vector<double, SPACE_DIM> global_maximum_point;
01794     MPI_Allreduce(&my_minimum_point.rGetLocation()[0], &global_minimum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
01795     MPI_Allreduce(&my_maximum_point.rGetLocation()[0], &global_maximum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
01796 
01797 
01798     ChastePoint<SPACE_DIM> min(global_minimum_point);
01799     ChastePoint<SPACE_DIM> max(global_maximum_point);
01800 
01801     return ChasteCuboid<SPACE_DIM>(min, max);
01802 }
01803 
01805 // Explicit instantiation
01807 
01808 template class DistributedTetrahedralMesh<1,1>;
01809 template class DistributedTetrahedralMesh<1,2>;
01810 template class DistributedTetrahedralMesh<1,3>;
01811 template class DistributedTetrahedralMesh<2,2>;
01812 template class DistributedTetrahedralMesh<2,3>;
01813 template class DistributedTetrahedralMesh<3,3>;
01814 
01815 
01816 // Serialization for Boost >= 1.36
01817 #include "SerializationExportWrapperForCpp.hpp"
01818 EXPORT_TEMPLATE_CLASS_ALL_DIMS(DistributedTetrahedralMesh)

Generated on Mon Nov 1 12:35:23 2010 for Chaste by  doxygen 1.5.5