DistributedTetrahedralMesh.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2011
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 #include "DistributedTetrahedralMesh.hpp"
00030 
00031 #include <cassert>
00032 #include <sstream>
00033 #include <string>
00034 
00035 #include "Exception.hpp"
00036 #include "Element.hpp"
00037 #include "BoundaryElement.hpp"
00038 
00039 #include "PetscTools.hpp"
00040 #include "DistributedVectorFactory.hpp"
00041 #include "OutputFileHandler.hpp"
00042 
00043 #include "RandomNumberGenerator.hpp"
00044 
00045 #include "Timer.hpp"
00046 
00047 #include "petscao.h"
00048 
00049 #include "Warnings.hpp"
00050 
00052 //   IMPLEMENTATION
00054 
00055 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00056 DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DistributedTetrahedralMesh(DistributedTetrahedralMeshPartitionType::type partitioningMethod)
00057     :
00058       mTotalNumElements(0u),
00059       mTotalNumBoundaryElements(0u),
00060       mTotalNumNodes(0u),
00061       mMetisPartitioning(partitioningMethod)
00062 {
00063     if (ELEMENT_DIM == 1)
00064     {
00065         //No METIS partition is possible - revert to DUMB
00066         mMetisPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
00067     }
00068 }
00069 
00070 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00071 DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::~DistributedTetrahedralMesh()
00072 {
00073     for (unsigned i=0; i<this->mHaloNodes.size(); i++)
00074     {
00075         delete this->mHaloNodes[i];
00076     }
00077 }
00078 
00079 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00080 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetDistributedVectorFactory(DistributedVectorFactory* pFactory)
00081 {
00082     AbstractMesh<ELEMENT_DIM,SPACE_DIM>::SetDistributedVectorFactory(pFactory);
00083     mMetisPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
00084 }
00085 
00086 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00087 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ComputeMeshPartitioning(
00088     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00089     std::set<unsigned>& rNodesOwned,
00090     std::set<unsigned>& rHaloNodesOwned,
00091     std::set<unsigned>& rElementsOwned,
00092     std::vector<unsigned>& rProcessorsOffset)
00093 {
00095 
00096     if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY && PetscTools::IsParallel())
00097     {
00098         /*
00099          *  With ParMetisLibraryNodePartitioning we compute the element partition first
00100          *  and then we work out the node ownership.
00101          */
00102         ParMetisLibraryNodePartitioning(rMeshReader, rElementsOwned, rNodesOwned, rHaloNodesOwned, rProcessorsOffset);
00103     }
00104     else
00105     {
00106         /*
00107          *  Otherwise we compute the node partition and then we workout element distribution
00108          */
00109         if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::METIS_LIBRARY && PetscTools::IsParallel())
00110         {
00111             MetisLibraryNodePartitioning(rMeshReader, rNodesOwned, rProcessorsOffset);
00112         }
00113         else if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
00114         {
00115             PetscMatrixPartitioning(rMeshReader, rNodesOwned, rProcessorsOffset);
00116         }
00117         else
00118         {
00119             DumbNodePartitioning(rMeshReader, rNodesOwned);
00120         }
00121 
00122         if ( rMeshReader.HasNclFile() )
00123         {
00124             // Form a set of all the element indices we are going to own
00125             // (union of the sets from the lines in the NCL file)
00126             for ( std::set<unsigned>::iterator iter=rNodesOwned.begin();
00127                   iter!=rNodesOwned.end();
00128                   ++iter )
00129             {
00130                 std::vector<unsigned> containing_elements = rMeshReader.GetContainingElementIndices( *iter );
00131                 rElementsOwned.insert( containing_elements.begin(), containing_elements.end() );
00132             }
00133 
00134             // Iterate through that set rather than mTotalNumElements (knowing that we own a least one node in each line)
00135             // Then read all the data into a node_index set
00136             std::set<unsigned> node_index_set;
00137 
00138             for ( std::set<unsigned>::iterator iter=rElementsOwned.begin();
00139                   iter!=rElementsOwned.end();
00140                   ++iter )
00141             {
00142                 ElementData element_data = rMeshReader.GetElementData( *iter );
00143                 node_index_set.insert( element_data.NodeIndices.begin(), element_data.NodeIndices.end() );
00144             }
00145 
00146             // Subtract off the rNodesOwned set to produce rHaloNodesOwned.
00147             // Note that rNodesOwned is a subset of node_index_set.
00148             // std::set_difference can't be used to fill a set...
00149             std::set<unsigned>::iterator iter_all = node_index_set.begin();
00150             std::set<unsigned>::iterator iter_owned = rNodesOwned.begin();
00151             while (iter_all != node_index_set.end() && iter_owned != rNodesOwned.end())
00152             {
00153                 if (*iter_all < *iter_owned) // Elements in sets are ordered
00154                 {
00155                     rHaloNodesOwned.insert(*iter_all++); // This node doesn't appear in rNodesOwned
00156                 }
00157                 else
00158                 {
00159                     iter_all++;
00160                     iter_owned++;
00161                 }
00162             }
00163             rHaloNodesOwned.insert(iter_all, node_index_set.end()); // Anything left over is halo
00164         }
00165         else
00166         {
00167             for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00168             {
00169                 ElementData element_data = rMeshReader.GetNextElementData();
00170 
00171                 bool element_owned = false;
00172                 std::set<unsigned> temp_halo_nodes;
00173 
00174                 for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00175                 {
00176                     if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
00177                     {
00178                         element_owned = true;
00179                         rElementsOwned.insert(element_number);
00180                     }
00181                     else
00182                     {
00183                         temp_halo_nodes.insert(element_data.NodeIndices[i]);
00184                     }
00185                 }
00186 
00187                 if (element_owned)
00188                 {
00189                     rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
00190                 }
00191             }
00192         }
00193 
00194         if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
00195         {
00196             PetscTools::Barrier();
00197             if (PetscTools::AmMaster())
00198             {
00199                 Timer::PrintAndReset("Element and halo node assignation");
00200             }
00201         }
00202     }
00203     rMeshReader.Reset();
00204 }
00205 
00206 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00207 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructFromMeshReader(
00208     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader)
00209 {
00210     std::set<unsigned> nodes_owned;
00211     std::set<unsigned> halo_nodes_owned;
00212     std::set<unsigned> elements_owned;
00213     std::vector<unsigned> proc_offsets;//(PetscTools::GetNumProcs());
00214 
00215     this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
00216     mTotalNumElements = rMeshReader.GetNumElements();
00217     mTotalNumBoundaryElements = rMeshReader.GetNumFaces();
00218     mTotalNumNodes = rMeshReader.GetNumNodes();
00219 
00220 
00221     PetscTools::Barrier();
00222     Timer::Reset();
00223     ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets);
00224     PetscTools::Barrier();
00225     //Timer::Print("partitioning");
00226 
00227     // Reserve memory
00228     this->mElements.reserve(elements_owned.size());
00229     this->mNodes.reserve(nodes_owned.size());
00230 
00231     if ( rMeshReader.IsFileFormatBinary() )
00232     {
00233         std::vector<double> coords;
00234         // Binary : load only the nodes which are needed
00235         for (std::set<unsigned>::const_iterator it=nodes_owned.begin(); it!=nodes_owned.end(); it++)
00236         {
00237             //Loop over wholly-owned nodes
00238             unsigned global_node_index = *it;
00239             coords = rMeshReader.GetNode(global_node_index);
00240             RegisterNode(global_node_index);
00241             Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, coords, false);
00242 
00243 //Node attributes in binary format are not yet supported, see #1730
00244 //            for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
00245 //            {
00246 //                double attribute = rMeshReader.GetNodeAttributes()[i];
00247 //                p_node->AddNodeAttribute(attribute);
00248 //            }
00249 
00250             this->mNodes.push_back(p_node);
00251         }
00252         for (std::set<unsigned>::const_iterator it=halo_nodes_owned.begin(); it!=halo_nodes_owned.end(); it++)
00253         {
00254             //Loop over halo-owned nodes
00255             unsigned global_node_index=*it;
00256             coords = rMeshReader.GetNode(global_node_index);
00257             RegisterHaloNode(global_node_index);
00258             mHaloNodes.push_back(new Node<SPACE_DIM>(global_node_index, coords, false));
00259         }
00260     }
00261     else
00262     {
00263         // Ascii : Sequentially load all the nodes and store those owned (or halo-owned) by the process
00264         for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
00265         {
00266             std::vector<double> coords;
00268             coords = rMeshReader.GetNextNode();
00269 
00270             // The node is owned by the processor
00271             if (nodes_owned.find(node_index) != nodes_owned.end())
00272             {
00273                 RegisterNode(node_index);
00274                 Node<SPACE_DIM>* p_node =  new Node<SPACE_DIM>(node_index, coords, false);
00275 
00276                 for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
00277                 {
00278                     double attribute = rMeshReader.GetNodeAttributes()[i];
00279                     p_node->AddNodeAttribute(attribute);
00280                 }
00281 
00282                 this->mNodes.push_back(p_node);
00283             }
00284 
00285             // The node is a halo node in this processor
00286             if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
00287             {
00288                 RegisterHaloNode(node_index);
00289                 mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00290             }
00291         }
00292     }
00293 
00294     if ( rMeshReader.IsFileFormatBinary() )
00295     {
00296         // Binary format, we loop only over the elements we have been assigned
00297         for (std::set<unsigned>::const_iterator elem_it=elements_owned.begin(); elem_it!=elements_owned.end(); elem_it++)
00298         {
00299             unsigned global_element_index=*elem_it;
00300             ElementData element_data;
00301             element_data = rMeshReader.GetElementData(global_element_index);
00302 
00303             std::vector<Node<SPACE_DIM>*> nodes;
00304             for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00305             {
00306                 //because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
00307                 nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
00308             }
00309 
00310             RegisterElement(global_element_index);
00311             Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(global_element_index, nodes);
00312             this->mElements.push_back(p_element);
00313 
00314             if (rMeshReader.GetNumElementAttributes() > 0)
00315             {
00316                 assert(rMeshReader.GetNumElementAttributes() == 1);
00317                 unsigned attribute_value = element_data.AttributeValue;
00318                 p_element->SetRegion(attribute_value);
00319             }
00320         }
00321     }
00322     else
00323     {
00324         // Load the elements owned by the processor
00325         for (unsigned element_index=0; element_index < mTotalNumElements; element_index++)
00326         {
00327             ElementData element_data;
00328 
00329             element_data = rMeshReader.GetNextElementData();
00330 
00331             // The element is owned by the processor
00332             if (elements_owned.find(element_index) != elements_owned.end())
00333             {
00334                 std::vector<Node<SPACE_DIM>*> nodes;
00335                 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00336                 {
00337                     //because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
00338                     nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
00339                 }
00340 
00341                 RegisterElement(element_index);
00342 
00343                 Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(element_index, nodes);
00344                 this->mElements.push_back(p_element);
00345 
00346                 if (rMeshReader.GetNumElementAttributes() > 0)
00347                 {
00348                     assert(rMeshReader.GetNumElementAttributes() == 1);
00349                     unsigned attribute_value = element_data.AttributeValue;
00350                     p_element->SetRegion(attribute_value);
00351                 }
00352             }
00353         }
00354     }
00355 
00356     // Boundary nodes and elements
00357     try
00358     {
00359         for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
00360         {
00361             ElementData face_data = rMeshReader.GetNextFaceData();
00362             std::vector<unsigned> node_indices = face_data.NodeIndices;
00363 
00364             bool own = false;
00365 
00366             for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00367             {
00368                 // if I own this node
00369                 if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
00370                 {
00371                     own = true;
00372                     break;
00373                 }
00374             }
00375 
00376             if (!own)
00377             {
00378                 continue;
00379             }
00380 
00381             // Determine if this is a boundary face
00382             std::set<unsigned> containing_element_indices; // Elements that contain this face
00383             std::vector<Node<SPACE_DIM>*> nodes;
00384 
00385             for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00386             {
00387                 //because we have populated mNodes and mHaloNodes above, we can now use this method,
00388                 //which SHOULD never throw (but it does).
00389                 try
00390                 {
00391                     nodes.push_back(this->GetNodeOrHaloNode(node_indices[node_index]));
00392                 }
00393                 catch (Exception &e)
00394                 {
00395                     EXCEPTION("Face does not appear in element file (Face " << face_index << " in "<<this->mMeshFileBaseName<< ")");
00396                 }
00397             }
00398 
00399             // This is a boundary face
00400             // Ensure all its nodes are marked as boundary nodes
00401             for (unsigned j=0; j<nodes.size(); j++)
00402             {
00403                 if (!nodes[j]->IsBoundaryNode())
00404                 {
00405                     nodes[j]->SetAsBoundaryNode();
00406                     this->mBoundaryNodes.push_back(nodes[j]);
00407                 }
00408                 // Register the index that this bounday element will have with the node
00409                 nodes[j]->AddBoundaryElement(face_index);
00410             }
00411 
00412             RegisterBoundaryElement(face_index);
00413             BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>* p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(face_index, nodes);
00414             this->mBoundaryElements.push_back(p_boundary_element);
00415 
00416             if (rMeshReader.GetNumFaceAttributes() > 0)
00417             {
00418                 assert(rMeshReader.GetNumFaceAttributes() == 1);
00419                 unsigned attribute_value = face_data.AttributeValue;
00420                 p_boundary_element->SetRegion(attribute_value);
00421             }
00422         }
00423     }
00424     catch (Exception &e)
00425     {
00426         PetscTools::ReplicateException(true); //Bad face exception
00427         throw e;
00428     }
00429     PetscTools::ReplicateException(false);
00430 
00431     if (mMetisPartitioning != DistributedTetrahedralMeshPartitionType::DUMB && PetscTools::IsParallel())
00432     {
00433         assert(this->mNodesPermutation.size() != 0);
00434         // We reorder so that each process owns a contiguous set of the indices and we can then build a distributed vector factory.
00435         ReorderNodes();
00436 
00437         unsigned num_owned;
00438         unsigned rank = PetscTools::GetMyRank();
00439         if ( !PetscTools::AmTopMost() )
00440         {
00441             num_owned =  proc_offsets[rank+1]-proc_offsets[rank];
00442         }
00443         else
00444         {
00445             num_owned = mTotalNumNodes - proc_offsets[rank];
00446         }
00447 
00448         assert(!this->mpDistributedVectorFactory);
00449         this->mpDistributedVectorFactory = new DistributedVectorFactory(this->GetNumNodes(), num_owned);
00450     }
00451     else
00452     {
00453         // Dumb or sequential partition
00454         assert(this->mpDistributedVectorFactory);
00455     }
00456     rMeshReader.Reset();
00457 }
00458 
00459 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00460 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalNodes() const
00461 {
00462     return this->mNodes.size();
00463 }
00464 
00465 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00466 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumHaloNodes() const
00467 {
00468     return this->mHaloNodes.size();
00469 }
00470 
00471 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00472 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalElements() const
00473 {
00474     return this->mElements.size();
00475 }
00476 
00477 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00478 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalBoundaryElements() const
00479 {
00480     return this->mBoundaryElements.size();
00481 }
00482 
00483 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00484 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumNodes() const
00485 {
00486     return mTotalNumNodes;
00487 }
00488 
00489 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00490 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumAllNodes() const
00491 {
00492     return mTotalNumNodes;
00493 }
00494 
00495 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00496 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumElements() const
00497 {
00498     return mTotalNumElements;
00499 }
00500 
00501 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00502 DistributedTetrahedralMeshPartitionType::type DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetPartitionType() const
00503 {
00504     return mMetisPartitioning;
00505 }
00506 
00507 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00508 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumBoundaryElements() const
00509 {
00510     return mTotalNumBoundaryElements;
00511 }
00512 
00513 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00514 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIndices(std::vector<unsigned>& rHaloIndices) const
00515 {
00516     //Make sure the output vector is empty
00517     rHaloIndices.clear();
00518     for (unsigned i=0; i<mHaloNodes.size(); i++)
00519     {
00520         rHaloIndices.push_back(mHaloNodes[i]->GetIndex());
00521     }
00522 }
00523 
00524 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00525 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetElementOwnerships()
00526 {
00527     // All the local elements are owned by the processor (obviously...)
00528     //Does nothing - unlike the non-distributed version
00529 }
00530 
00531 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00532 bool DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfElement( unsigned elementIndex )
00533 {
00534     try
00535     {
00536         return(AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfElement(elementIndex));
00537     }
00538     catch(Exception& e)      // we don't own the element
00539     {
00540         return false;
00541     }
00542 }
00543 
00544 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00545 bool DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfBoundaryElement( unsigned faceIndex )
00546 {
00547     try
00548     {
00549         return(AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfBoundaryElement(faceIndex));
00550     }
00551     catch(Exception& e)      //  we don't own the face
00552     {
00553         return false;
00554     }
00555 }
00556 
00557 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00558 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterNode(unsigned index)
00559 {
00560     mNodesMapping[index] = this->mNodes.size();
00561 }
00562 
00563 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00564 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterHaloNode(unsigned index)
00565 {
00566     mHaloNodesMapping[index] = mHaloNodes.size();
00567 }
00568 
00569 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00570 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterElement(unsigned index)
00571 {
00572     mElementsMapping[index] = this->mElements.size();
00573 }
00574 
00575 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00576 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterBoundaryElement(unsigned index)
00577 {
00578     mBoundaryElementsMapping[index] = this->mBoundaryElements.size();
00579 }
00580 
00581 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00582 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveNodeMapping(unsigned index) const
00583 {
00584     std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index);
00585 
00586     if (node_position == mNodesMapping.end())
00587     {
00588         EXCEPTION("Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank());
00589     }
00590     return node_position->second;
00591 }
00592 
00593 //template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00594 //unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
00595 //{
00596 //    assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
00597 //    return mHaloNodesMapping[index];
00598 //}
00599 
00600 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00601 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveElementMapping(unsigned index) const
00602 {
00603     std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index);
00604 
00605     if (element_position == mElementsMapping.end())
00606     {
00607         EXCEPTION("Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank());
00608     }
00609 
00610     return element_position->second;
00611 }
00612 
00613 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00614 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveBoundaryElementMapping(unsigned index) const
00615 {
00616     std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index);
00617 
00618     if (boundary_element_position == mBoundaryElementsMapping.end())
00619     {
00620         EXCEPTION("Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank());
00621     }
00622 
00623     return boundary_element_position->second;
00624 }
00625 
00626 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00627 Node<SPACE_DIM> * DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNodeOrHaloNode(unsigned index) const
00628 {
00629     std::map<unsigned, unsigned>::const_iterator node_position;
00630     //First search the halo
00631     if ((node_position=mHaloNodesMapping.find(index)) != mHaloNodesMapping.end())
00632     {
00633         return mHaloNodes[node_position->second];
00634     }
00635     //First search the owned node
00636     if ((node_position=mNodesMapping.find(index)) != mNodesMapping.end())
00637     {
00638         //Found an owned node
00639         return this->mNodes[node_position->second];
00640     }
00641     //Not here
00642     EXCEPTION("Requested node/halo " << index << " does not belong to processor " << PetscTools::GetMyRank());
00643 }
00644 
00645 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00646 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DumbNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00647                                                                               std::set<unsigned>& rNodesOwned)
00648 {
00649     if (this->mpDistributedVectorFactory)
00650     {
00651         // A distribution is given by the factory
00652         if (this->mpDistributedVectorFactory->GetProblemSize() != mTotalNumNodes)
00653         {
00654             // Reset stuff
00655             this->mpDistributedVectorFactory = NULL;
00656             this->mTotalNumNodes = 0u;
00657             this->mTotalNumElements = 0u;
00658             this->mTotalNumBoundaryElements = 0u;
00659             EXCEPTION("The distributed vector factory size in the mesh doesn't match the total number of nodes.");
00660         }
00661     }
00662     else
00663     {
00664         this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
00665     }
00666     for (unsigned node_index = this->mpDistributedVectorFactory->GetLow();
00667          node_index < this->mpDistributedVectorFactory->GetHigh();
00668          node_index++)
00669     {
00670          rNodesOwned.insert(node_index);
00671     }
00672 }
00673 
00674 
00675 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00676 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::PetscMatrixPartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00677                                                                                  std::set<unsigned>& rNodesOwned,
00678                                                                                  std::vector<unsigned>& rProcessorsOffset)
00679 {
00680     assert(PetscTools::IsParallel());
00681     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
00682 
00683     unsigned num_nodes = rMeshReader.GetNumNodes();
00684     PetscInt num_procs = PetscTools::GetNumProcs();
00685     unsigned local_proc_index = PetscTools::GetMyRank();
00686 
00687     unsigned num_elements = rMeshReader.GetNumElements();
00688     unsigned num_local_elements = num_elements / num_procs;
00689     unsigned first_local_element = num_local_elements * local_proc_index;
00690     if (PetscTools::AmTopMost())
00691     {
00692         // Take the excess elements
00693         num_local_elements += num_elements - (num_local_elements * num_procs);
00694     }
00695 
00696     PetscTools::Barrier();
00697     Timer::Reset();
00698 
00699     /*
00700      * Create PETSc matrix which will have 1 for adjacent nodes.
00701      */
00702     Mat connectivity_matrix;
00704     PetscTools::SetupMat(connectivity_matrix, num_nodes, num_nodes, 54, PETSC_DECIDE, PETSC_DECIDE, false);
00705 
00706     if ( ! rMeshReader.IsFileFormatBinary() )
00707     {
00708         // Advance the file pointer to the first element I own
00709         for (unsigned element_index = 0; element_index < first_local_element; element_index++)
00710         {
00711             ElementData element_data = rMeshReader.GetNextElementData();
00712         }
00713     }
00714 
00715     // In the loop below we assume that there exist edges between any pair of nodes in an element. This is
00716     // a reasonable assumption for triangles and tetrahedra. This won't be the case for squares or hexahedra
00717     // (or higher order elements). We allow ELEMENT_DIM smaller than SPACE_DIM in case this is a 2D mesh in
00718     // a 3D space.
00719     assert(SPACE_DIM >= ELEMENT_DIM);
00720 
00721     for (unsigned element_index = 0; element_index < num_local_elements; element_index++)
00722     {
00723         ElementData element_data;
00724 
00725         if ( rMeshReader.IsFileFormatBinary() )
00726         {
00727             element_data = rMeshReader.GetElementData(first_local_element + element_index);
00728         }
00729         else
00730         {
00731             element_data = rMeshReader.GetNextElementData();
00732         }
00733 
00734         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00735         {
00736             for (unsigned j=0; j<i; j++)
00737             {
00738                 unsigned row = element_data.NodeIndices[i];
00739                 unsigned col = element_data.NodeIndices[j];
00740                 MatSetValue(connectivity_matrix, row, col, 1.0, INSERT_VALUES);
00741                 MatSetValue(connectivity_matrix, col, row, 1.0, INSERT_VALUES);
00742             }
00743         }
00744     }
00746     MatAssemblyBegin(connectivity_matrix, MAT_FINAL_ASSEMBLY);
00747     MatAssemblyEnd(connectivity_matrix, MAT_FINAL_ASSEMBLY);
00748 
00749     PetscTools::Barrier();
00750     if (PetscTools::AmMaster())
00751     {
00752         Timer::PrintAndReset("Connectivity matrix assembly");
00753     }
00754 
00755     rMeshReader.Reset();
00756 
00757     PetscInt connectivity_matrix_lo;
00758     PetscInt connectivity_matrix_hi;
00759     MatGetOwnershipRange(connectivity_matrix, &connectivity_matrix_lo, &connectivity_matrix_hi);
00760 
00761     unsigned num_local_nodes = connectivity_matrix_hi - connectivity_matrix_lo;
00762 
00763     MatInfo matrix_info;
00764     MatGetInfo(connectivity_matrix, MAT_LOCAL, &matrix_info);
00765     unsigned local_num_nz = (unsigned) matrix_info.nz_used;
00766 
00767     size_t size = (num_local_nodes+1)*sizeof(PetscInt);
00768     void* ptr;
00769     PetscMalloc(size, &ptr);
00770     PetscInt* local_ia = (PetscInt*) ptr;
00771     size = local_num_nz*sizeof(PetscInt);
00772     PetscMalloc(size, &ptr);
00773     PetscInt* local_ja = (PetscInt*) ptr;
00774 
00775     PetscInt row_num_nz;
00776     const PetscInt* column_indices;
00777 
00778     local_ia[0]=0;
00779     for (PetscInt row_global_index=connectivity_matrix_lo; row_global_index<connectivity_matrix_hi; row_global_index++)
00780     {
00781         MatGetRow(connectivity_matrix, row_global_index, &row_num_nz, &column_indices, PETSC_NULL);
00782 
00783         unsigned row_local_index = row_global_index - connectivity_matrix_lo;
00784         local_ia[row_local_index+1] = local_ia[row_local_index] + row_num_nz;
00785         for (PetscInt col_index=0; col_index<row_num_nz; col_index++)
00786         {
00787            local_ja[local_ia[row_local_index] + col_index] =  column_indices[col_index];
00788         }
00789 
00790         MatRestoreRow(connectivity_matrix, row_global_index, &row_num_nz,&column_indices, PETSC_NULL);
00791     }
00792 
00793     MatDestroy(connectivity_matrix);
00794 
00795     // Convert to an adjacency matrix
00796     Mat adj_matrix;
00797     MatCreateMPIAdj(PETSC_COMM_WORLD, num_local_nodes, num_nodes, local_ia, local_ja, PETSC_NULL, &adj_matrix);
00798 
00799     PetscTools::Barrier();
00800     if (PetscTools::AmMaster())
00801     {
00802         Timer::PrintAndReset("Adjacency matrix creation");
00803     }
00804 
00805     // Get PETSc to call ParMETIS
00806     MatPartitioning part;
00807     MatPartitioningCreate(PETSC_COMM_WORLD, &part);
00808     MatPartitioningSetAdjacency(part, adj_matrix);
00809     MatPartitioningSetFromOptions(part);
00810     IS new_process_numbers;
00811     MatPartitioningApply(part, &new_process_numbers);
00812     MatPartitioningDestroy(part);
00813 
00815     MatDestroy(adj_matrix);
00816 
00817     PetscTools::Barrier();
00818     if (PetscTools::AmMaster())
00819     {
00820         Timer::PrintAndReset("PETSc-ParMETIS call");
00821     }
00822 
00823     // Figure out who owns what - processor offsets
00824     PetscInt* num_nodes_per_process = new PetscInt[num_procs];
00825 #if (PETSC_VERSION_MAJOR == 3) //PETSc 3.x.x
00826     ISPartitioningCount(new_process_numbers, num_procs, num_nodes_per_process);
00827 #else
00828     ISPartitioningCount(new_process_numbers, num_nodes_per_process);
00829 #endif
00830 
00831     rProcessorsOffset.resize(num_procs);
00832     rProcessorsOffset[0] = 0;
00833     for (PetscInt i=1; i<num_procs; i++)
00834     {
00835         rProcessorsOffset[i] = rProcessorsOffset[i-1] + num_nodes_per_process[i-1];
00836     }
00837     unsigned my_num_nodes = num_nodes_per_process[local_proc_index];
00838     delete[] num_nodes_per_process;
00839 
00840     // Figure out who owns what - new node numbering
00841     IS new_global_node_indices;
00842     ISPartitioningToNumbering(new_process_numbers, &new_global_node_indices);
00843 
00844     // Index sets only give local information, we want global
00845     AO ordering;
00846     AOCreateBasicIS(new_global_node_indices, PETSC_NULL /* natural ordering */, &ordering);
00847 
00848     // The locally owned range under the new numbering
00849     PetscInt* local_range = new PetscInt[my_num_nodes];
00850     for (unsigned i=0; i<my_num_nodes; i++)
00851     {
00852         local_range[i] = rProcessorsOffset[local_proc_index] + i;
00853     }
00854     AOApplicationToPetsc(ordering, my_num_nodes, local_range);
00855     //AOView(ordering, PETSC_VIEWER_STDOUT_WORLD);
00856 
00857     // Fill in rNodesOwned
00859     for (unsigned i=0; i<my_num_nodes; i++)
00860     {
00861         rNodesOwned.insert(local_range[i]);
00862     }
00863     delete[] local_range;
00864 
00865     // Once we know the offsets we can compute the permutation vector
00866     PetscInt* global_range = new PetscInt[num_nodes];
00867     for (unsigned i=0; i<num_nodes; i++)
00868     {
00869         global_range[i] = i;
00870     }
00871     AOPetscToApplication(ordering, num_nodes, global_range);
00872 
00873     this->mNodesPermutation.resize(num_nodes);
00874 
00875     for (unsigned node_index=0; node_index<num_nodes; node_index++)
00876     {
00877         this->mNodesPermutation[node_index] = global_range[node_index];
00878     }
00879     delete[] global_range;
00880 
00881     AODestroy(ordering);
00882     ISDestroy(new_process_numbers);
00883     ISDestroy(new_global_node_indices);
00884 
00885     PetscTools::Barrier();
00886     if (PetscTools::AmMaster())
00887     {
00888         Timer::PrintAndReset("PETSc-ParMETIS output manipulation");
00889     }
00890 }
00891 
00892 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00893 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::MetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00894                                                                                   std::set<unsigned>& rNodesOwned,
00895                                                                                   std::vector<unsigned>& rProcessorsOffset)
00896 {
00897     assert(PetscTools::IsParallel());
00898 
00899     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
00900 
00901     int nn = rMeshReader.GetNumNodes();
00902     idxtype* npart = new idxtype[nn];
00903     assert(npart != NULL);
00904 
00905     //Only the master process will access the element data and perform the partitioning
00906     if (PetscTools::AmMaster())
00907     {
00908         int ne = rMeshReader.GetNumElements();
00909         idxtype* elmnts = new idxtype[ne * (ELEMENT_DIM+1)];
00910         assert(elmnts != NULL);
00911 
00912         unsigned counter=0;
00913         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00914         {
00915             ElementData element_data = rMeshReader.GetNextElementData();
00916 
00917             for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00918             {
00919                 elmnts[counter++] = element_data.NodeIndices[i];
00920             }
00921         }
00922         rMeshReader.Reset();
00923 
00924         int etype;
00925 
00926         switch (ELEMENT_DIM)
00927         {
00928             case 2:
00929                 etype = 1; //1 is Metis speak for triangles
00930                 break;
00931             case 3:
00932                 etype = 2; //2 is Metis speak for tetrahedra
00933                 break;
00934             default:
00935                 NEVER_REACHED;
00936         }
00937 
00938         int numflag = 0; //0 means C-style numbering is assumed
00939         int nparts = PetscTools::GetNumProcs();
00940         int edgecut;
00941         idxtype* epart = new idxtype[ne];
00942         assert(epart != NULL);
00943 
00944         Timer::Reset();
00945         METIS_PartMeshNodal(&ne, &nn, elmnts, &etype, &numflag, &nparts, &edgecut, epart, npart);//, wgetflag, vwgt);
00946         //Timer::Print("METIS call");
00947 
00948         delete[] elmnts;
00949         delete[] epart;
00950     }
00951 
00952     //Here's the new bottle-neck: share all the node ownership data
00953     //idxtype is normally int (see metis-4.0/Lib/struct.h 17-22)
00954     assert(sizeof(idxtype) == sizeof(int));
00955     MPI_Bcast(npart /*data*/, nn /*size*/, MPI_INT, 0 /*From Master*/, PETSC_COMM_WORLD);
00956 
00957     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
00958     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
00959 
00960     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00961     {
00962         unsigned part_read = npart[node_index];
00963 
00964         // METIS output says I own this node
00965         if (part_read == PetscTools::GetMyRank())
00966         {
00967             rNodesOwned.insert(node_index);
00968         }
00969 
00970         // Offset is defined as the first node owned by a processor. We compute it incrementally.
00971         // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
00972         // offset a position.
00973         for (unsigned proc=part_read+1; proc<PetscTools::GetNumProcs(); proc++)
00974         {
00975             rProcessorsOffset[proc]++;
00976         }
00977     }
00978 
00979     /*
00980      *  Once we know the offsets we can compute the permutation vector
00981      */
00982     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
00983 
00984     this->mNodesPermutation.resize(this->GetNumNodes());
00985 
00986     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00987     {
00988         unsigned part_read = npart[node_index];
00989 
00990         this->mNodesPermutation[node_index] = rProcessorsOffset[part_read] + local_index[part_read];
00991 
00992         local_index[part_read]++;
00993     }
00994 
00995     delete[] npart;
00996 }
00997 
00998 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00999 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ReorderNodes()
01000 {
01001     assert(PetscTools::IsParallel());
01002 
01003     // Need to rebuild global-local maps
01004     mNodesMapping.clear();
01005     mHaloNodesMapping.clear();
01006 
01007     // Update indices
01008     for (unsigned index=0; index<this->mNodes.size(); index++)
01009     {
01010         unsigned old_index = this->mNodes[index]->GetIndex();
01011         unsigned new_index = this->mNodesPermutation[old_index];
01012 
01013         this->mNodes[index]->SetIndex(new_index);
01014         mNodesMapping[new_index] = index;
01015     }
01016 
01017     for (unsigned index=0; index<mHaloNodes.size(); index++)
01018     {
01019         unsigned old_index = mHaloNodes[index]->GetIndex();
01020         unsigned new_index = this->mNodesPermutation[old_index];
01021 
01022         mHaloNodes[index]->SetIndex(new_index);
01023         mHaloNodesMapping[new_index] = index;
01024     }
01025 }
01026 
01027 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01028 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructLinearMesh(unsigned width)
01029 {
01030     assert(ELEMENT_DIM == 1);
01031 
01032      //Check that there are enough nodes to make the parallelisation worthwhile
01033     if (width==0)
01034     {
01035         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01036     }
01037     //Use dumb partition so that archiving doesn't permute anything
01038     mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
01039     mTotalNumNodes=width+1;
01040     mTotalNumBoundaryElements=2u;
01041     mTotalNumElements=width;
01042 
01043     //Use DistributedVectorFactory to make a dumb partition of the nodes
01044     assert(!this->mpDistributedVectorFactory);
01045     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
01046     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01047     {
01048         //It's a short mesh and this process owns no nodes
01049         return;
01050     }
01051 
01052     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01053      * higher numbered process may have dropped out of this construction altogether
01054      * (because is has no local ownership)
01055      */
01056     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01057 
01058     unsigned lo_node=this->mpDistributedVectorFactory->GetLow();
01059     unsigned hi_node=this->mpDistributedVectorFactory->GetHigh();
01060     if (!PetscTools::AmMaster())
01061     {
01062         //Allow for a halo node
01063         lo_node--;
01064     }
01065     if (!am_top_most)
01066     {
01067         //Allow for a halo node
01068         hi_node++;
01069     }
01070     Node<SPACE_DIM>* p_old_node=NULL;
01071     for (unsigned node_index=lo_node; node_index<hi_node; node_index++)
01072     {
01073         // create node or halo-node
01074         Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, node_index==0 || node_index==width, node_index);
01075         if (node_index<this->mpDistributedVectorFactory->GetLow() ||
01076             node_index==this->mpDistributedVectorFactory->GetHigh() )
01077         {
01078             //Beyond left or right it's a halo node
01079             RegisterHaloNode(node_index);
01080             mHaloNodes.push_back(p_node);
01081         }
01082         else
01083         {
01084             RegisterNode(node_index);
01085             this->mNodes.push_back(p_node); // create node
01086 
01087             //A boundary face has to be wholely owned by the process
01088             //Since, when ELEMENT_DIM>1 we have *at least* boundary node as a non-halo
01089             if (node_index==0) // create left boundary node and boundary element
01090             {
01091                 this->mBoundaryNodes.push_back(p_node);
01092                 RegisterBoundaryElement(0);
01093                 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(0, p_node) );
01094             }
01095             if (node_index==width) // create right boundary node and boundary element
01096             {
01097                 this->mBoundaryNodes.push_back(p_node);
01098                 RegisterBoundaryElement(1);
01099                 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(1, p_node) );
01100             }
01101             }
01102         if (node_index>lo_node) // create element
01103         {
01104             std::vector<Node<SPACE_DIM>*> nodes;
01105             nodes.push_back(p_old_node);
01106             nodes.push_back(p_node);
01107             RegisterElement(node_index-1);
01108             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(node_index-1, nodes) );
01109         }
01110         //Keep track of the node which we've just created
01111         p_old_node=p_node;
01112     }
01113 }
01114 
01115 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01116 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructRectangularMesh(unsigned width, unsigned height, bool stagger)
01117 {
01118     assert(SPACE_DIM == 2);
01119     assert(ELEMENT_DIM == 2);
01120     //Check that there are enough nodes to make the parallelisation worthwhile
01121     if (height==0)
01122     {
01123         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01124     }
01125     //Use dumb partition so that archiving doesn't permute anything
01126     mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
01127 
01128     mTotalNumNodes=(width+1)*(height+1);
01129     mTotalNumBoundaryElements=(width+height)*2;
01130     mTotalNumElements=width*height*2;
01131 
01132     //Use DistributedVectorFactory to make a dumb partition of space
01133     DistributedVectorFactory y_partition(height+1);
01134     unsigned lo_y = y_partition.GetLow();
01135     unsigned hi_y = y_partition.GetHigh();
01136     //Dumb partition of nodes has to be such that each process gets complete slices
01137     assert(!this->mpDistributedVectorFactory);
01138     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*y_partition.GetLocalOwnership());
01139     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01140     {
01141         //It's a short mesh and this process owns no nodes
01142         return;
01143     }
01144     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01145      * higher numbered process may have dropped out of this construction altogether
01146      * (because is has no local ownership)
01147      */
01148     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01149 
01150 
01151     if (!PetscTools::AmMaster())
01152     {
01153         //Allow for a halo node
01154         lo_y--;
01155     }
01156     if (!am_top_most)
01157     {
01158         //Allow for a halo node
01159         hi_y++;
01160     }
01161 
01162     //Construct the nodes
01163     for (unsigned j=lo_y; j<hi_y; j++)
01164     {
01165         for (unsigned i=0; i<width+1; i++)
01166         {
01167             bool is_boundary=false;
01168             if (i==0 || j==0 || i==width || j==height)
01169             {
01170                 is_boundary=true;
01171             }
01172             unsigned global_node_index=((width+1)*(j) + i); //Verified from sequential
01173             Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j);
01174             if (j<y_partition.GetLow() || j==y_partition.GetHigh() )
01175             {
01176                 //Beyond left or right it's a halo node
01177                 RegisterHaloNode(global_node_index);
01178                 mHaloNodes.push_back(p_node);
01179             }
01180             else
01181             {
01182                 RegisterNode(global_node_index);
01183                 this->mNodes.push_back(p_node);
01184             }
01185             if (is_boundary)
01186             {
01187                 this->mBoundaryNodes.push_back(p_node);
01188             }
01189         }
01190     }
01191 
01192     //Construct the boundary elements
01193     unsigned belem_index;
01194     //Top
01195     if (am_top_most)
01196     {
01197        for (unsigned i=0; i<width; i++)
01198        {
01199             std::vector<Node<SPACE_DIM>*> nodes;
01200             nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i+1 ));
01201             nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i ));
01202             belem_index=i;
01203             RegisterBoundaryElement(belem_index);
01204             this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01205         }
01206     }
01207 
01208     //Right
01209     for (unsigned j=lo_y+1; j<hi_y; j++)
01210     {
01211         std::vector<Node<SPACE_DIM>*> nodes;
01212         nodes.push_back(GetNodeOrHaloNode( (width+1)*j-1 ));
01213         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1)-1 ));
01214         belem_index=width+j-1;
01215         RegisterBoundaryElement(belem_index);
01216         this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01217     }
01218 
01219     //Bottom
01220     if (PetscTools::AmMaster())
01221     {
01222         for (unsigned i=0; i<width; i++)
01223         {
01224             std::vector<Node<SPACE_DIM>*> nodes;
01225             nodes.push_back(GetNodeOrHaloNode( i ));
01226             nodes.push_back(GetNodeOrHaloNode( i+1 ));
01227             belem_index=width+height+i;
01228             RegisterBoundaryElement(belem_index);
01229             this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01230         }
01231     }
01232 
01233     //Left
01234     for (unsigned j=lo_y; j<hi_y-1; j++)
01235     {
01236         std::vector<Node<SPACE_DIM>*> nodes;
01237         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1) ));
01238         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j) ));
01239         belem_index=2*width+height+j;
01240         RegisterBoundaryElement(belem_index);
01241         this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01242     }
01243 
01244 
01245     //Construct the elements
01246     unsigned elem_index;
01247     for (unsigned j=lo_y; j<hi_y-1; j++)
01248     {
01249         for (unsigned i=0; i<width; i++)
01250         {
01251             unsigned parity=(i+(height-j))%2;//Note that parity is measured from the top-left (not bottom left) for historical reasons
01252             unsigned nw=(j+1)*(width+1)+i; //ne=nw+1
01253             unsigned sw=(j)*(width+1)+i;   //se=sw+1
01254             std::vector<Node<SPACE_DIM>*> upper_nodes;
01255             upper_nodes.push_back(GetNodeOrHaloNode( nw ));
01256             upper_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
01257             if (stagger==false  || parity == 1)
01258             {
01259                 upper_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
01260             }
01261             else
01262             {
01263                 upper_nodes.push_back(GetNodeOrHaloNode( sw ));
01264             }
01265             elem_index=2*(j*width+i);
01266             RegisterElement(elem_index);
01267             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,upper_nodes));
01268             std::vector<Node<SPACE_DIM>*> lower_nodes;
01269             lower_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
01270             lower_nodes.push_back(GetNodeOrHaloNode( sw ));
01271             if (stagger==false  ||parity == 1)
01272             {
01273                 lower_nodes.push_back(GetNodeOrHaloNode( nw ));
01274             }
01275             else
01276             {
01277                 lower_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
01278             }
01279             elem_index++;
01280             RegisterElement(elem_index);
01281             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,lower_nodes));
01282         }
01283     }
01284 
01285 }
01286 
01287 
01288 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01289 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructCuboid(unsigned width,
01290         unsigned height,
01291         unsigned depth)
01292 {
01293     assert(SPACE_DIM == 3);
01294     assert(ELEMENT_DIM == 3);
01295     //Check that there are enough nodes to make the parallelisation worthwhile
01296     if (depth==0)
01297     {
01298         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01299     }
01300 
01301     //Use dumb partition so that archiving doesn't permute anything
01302     mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
01303 
01304     mTotalNumNodes=(width+1)*(height+1)*(depth+1);
01305     mTotalNumBoundaryElements=((width*height)+(width*depth)+(height*depth))*4;//*2 for top-bottom, *2 for tessellating each unit square
01306     mTotalNumElements=width*height*depth*6;
01307 
01308     //Use DistributedVectorFactory to make a dumb partition of space
01309     DistributedVectorFactory z_partition(depth+1);
01310     unsigned lo_z = z_partition.GetLow();
01311     unsigned hi_z = z_partition.GetHigh();
01312 
01313     //Dumb partition of nodes has to be such that each process gets complete slices
01314     assert(!this->mpDistributedVectorFactory);
01315     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*(height+1)*z_partition.GetLocalOwnership());
01316     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01317     {
01318 #define COVERAGE_IGNORE
01319         // It's a short mesh and this process owns no nodes.  This problem can only occur on 4 or more processes,
01320         // so we can't cover it - coverage only runs with 1 and 2 processes.
01321         WARNING("No nodes were assigned to processor " << PetscTools::GetMyRank() << " in DistributedTetrahedralMesh::ConstructCuboid()");
01322         return;
01323 #undef COVERAGE_IGNORE
01324     }
01325     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01326      * higher numbered process may have dropped out of this construction altogether
01327      * (because is has no local ownership)
01328      */
01329     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01330 
01331 
01332 
01333     if (!PetscTools::AmMaster())
01334     {
01335         //Allow for a halo node
01336         lo_z--;
01337     }
01338     if (!am_top_most)
01339     {
01340         //Allow for a halo node
01341         hi_z++;
01342     }
01343 
01344     //Construct the nodes
01345     unsigned global_node_index;
01346     for (unsigned k=lo_z; k<hi_z; k++)
01347     {
01348         for (unsigned j=0; j<height+1; j++)
01349         {
01350             for (unsigned i=0; i<width+1; i++)
01351             {
01352                 bool is_boundary = false;
01353                 if (i==0 || j==0 || k==0 || i==width || j==height || k==depth)
01354                 {
01355                     is_boundary = true;
01356                 }
01357                 global_node_index = (k*(height+1)+j)*(width+1)+i;
01358 
01359                 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j, k);
01360 
01361                 if (k<z_partition.GetLow() || k==z_partition.GetHigh() )
01362                 {
01363                     //Beyond left or right it's a halo node
01364                     RegisterHaloNode(global_node_index);
01365                     mHaloNodes.push_back(p_node);
01366                 }
01367                 else
01368                 {
01369                     RegisterNode(global_node_index);
01370                     this->mNodes.push_back(p_node);
01371                 }
01372 
01373                 if (is_boundary)
01374                 {
01375                     this->mBoundaryNodes.push_back(p_node);
01376                 }
01377             }
01378         }
01379     }
01380 
01381     // Construct the elements
01382 
01383     unsigned element_nodes[6][4] = {{0, 1, 5, 7}, {0, 1, 3, 7},
01384                                         {0, 2, 3, 7}, {0, 2, 6, 7},
01385                                         {0, 4, 6, 7}, {0, 4, 5, 7}};
01386     std::vector<Node<SPACE_DIM>*> tetrahedra_nodes;
01387 
01388     for (unsigned k=lo_z; k<hi_z-1; k++)
01389     {
01390         unsigned belem_index = 0;
01391         if (k != 0)
01392         {
01393             // height*width squares on upper face, k layers of 2*height+2*width square aroun
01394             belem_index =   2*(height*width+k*2*(height+width));
01395         }
01396 
01397         for (unsigned j=0; j<height; j++)
01398         {
01399             for (unsigned i=0; i<width; i++)
01400             {
01401                 // Compute the nodes' index
01402                 unsigned global_node_indices[8];
01403                 unsigned local_node_index = 0;
01404 
01405                 for (unsigned z = 0; z < 2; z++)
01406                 {
01407                     for (unsigned y = 0; y < 2; y++)
01408                     {
01409                         for (unsigned x = 0; x < 2; x++)
01410                         {
01411                             global_node_indices[local_node_index] = i+x+(width+1)*(j+y+(height+1)*(k+z));
01412 
01413                             local_node_index++;
01414                         }
01415                     }
01416                 }
01417 
01418                 for (unsigned m = 0; m < 6; m++)
01419                 {
01420                     // Tetrahedra #m
01421 
01422                     tetrahedra_nodes.clear();
01423 
01424                     for (unsigned n = 0; n < 4; n++)
01425                     {
01426                         tetrahedra_nodes.push_back(GetNodeOrHaloNode( global_node_indices[element_nodes[m][n]] ));
01427                     }
01428                     unsigned elem_index = 6 * ((k*height+j)*width+i)+m;
01429                     RegisterElement(elem_index);
01430                     this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index, tetrahedra_nodes));
01431                 }
01432 
01433                 //Are we at a boundary?
01434                 std::vector<Node<SPACE_DIM>*> triangle_nodes;
01435 
01436                 if (i == 0) //low face at x==0
01437                 {
01438                     triangle_nodes.clear();
01439                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01440                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01441                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01442                     RegisterBoundaryElement(belem_index);
01443                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01444                     triangle_nodes.clear();
01445                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01446                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01447                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01448                     RegisterBoundaryElement(belem_index);
01449                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01450                 }
01451                 if (i == width-1) //high face at x=width
01452                 {
01453                     triangle_nodes.clear();
01454                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01455                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01456                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01457                     RegisterBoundaryElement(belem_index);
01458                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01459                     triangle_nodes.clear();
01460                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01461                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01462                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01463                     RegisterBoundaryElement(belem_index);
01464                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01465                 }
01466                 if (j == 0) //low face at y==0
01467                 {
01468                     triangle_nodes.clear();
01469                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01470                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01471                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01472                     RegisterBoundaryElement(belem_index);
01473                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01474                     triangle_nodes.clear();
01475                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01476                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01477                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01478                     RegisterBoundaryElement(belem_index);
01479                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01480                 }
01481                 if (j == height-1) //high face at y=height
01482                 {
01483                     triangle_nodes.clear();
01484                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01485                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01486                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01487                     RegisterBoundaryElement(belem_index);
01488                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01489                     triangle_nodes.clear();
01490                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01491                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01492                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01493                     RegisterBoundaryElement(belem_index);
01494                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01495                 }
01496                 if (k == 0) //low face at z==0
01497                 {
01498                     triangle_nodes.clear();
01499                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01500                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01501                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01502                     RegisterBoundaryElement(belem_index);
01503                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01504                     triangle_nodes.clear();
01505                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01506                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01507                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01508                     RegisterBoundaryElement(belem_index);
01509                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01510                 }
01511                 if (k == depth-1) //high face at z=depth
01512                 {
01513                     triangle_nodes.clear();
01514                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01515                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01516                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01517                     RegisterBoundaryElement(belem_index);
01518                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01519                     triangle_nodes.clear();
01520                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01521                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01522                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01523                     RegisterBoundaryElement(belem_index);
01524                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01525                 }
01526             }//i
01527         }//j
01528     }//k
01529 }
01530 
01531 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01532 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(const double xFactor, const double yFactor, const double zFactor)
01533 {
01534     //Base class scale (scales node positions)
01535     AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(xFactor, yFactor, zFactor);
01536     //Scales halos
01537     for (unsigned i=0; i<mHaloNodes.size(); i++)
01538     {
01539         c_vector<double, SPACE_DIM>& r_location = mHaloNodes[i]->rGetModifiableLocation();
01540         if (SPACE_DIM>=3)
01541         {
01542             r_location[2] *= zFactor;
01543         }
01544         if (SPACE_DIM>=2)
01545         {
01546             r_location[1] *= yFactor;
01547         }
01548         r_location[0] *= xFactor;
01549     }
01550 
01551 }
01552 
01553 
01554 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01555 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ParMetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
01556                                                                                        std::set<unsigned>& rElementsOwned,
01557                                                                                        std::set<unsigned>& rNodesOwned,
01558                                                                                        std::set<unsigned>& rHaloNodesOwned,
01559                                                                                        std::vector<unsigned>& rProcessorsOffset)
01560 {
01561     assert(PetscTools::IsParallel());
01562     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
01563 
01564     const unsigned num_elements = rMeshReader.GetNumElements();
01565     const unsigned num_procs = PetscTools::GetNumProcs();
01566     const unsigned local_proc_index = PetscTools::GetMyRank();
01567 
01568     /*
01569      *  Work out initial element distribution
01570      */
01571     idxtype element_distribution[num_procs+1];
01572     idxtype element_count[num_procs];
01573 
01574     element_distribution[0]=0;
01575 
01576     for (unsigned proc_index=1; proc_index<num_procs; proc_index++)
01577     {
01578         element_distribution[proc_index] = element_distribution[proc_index-1] + num_elements/num_procs;
01579         element_count[proc_index-1] = element_distribution[proc_index] - element_distribution[proc_index-1];
01580     }
01581 
01582     element_distribution[num_procs] = num_elements;
01583     element_count[num_procs-1] = element_distribution[num_procs] - element_distribution[num_procs-1];
01584 
01585     /*
01586      *  Create distributed mesh data structure
01587      */
01588     unsigned first_local_element = element_distribution[local_proc_index];
01589     unsigned last_plus_one_element = element_distribution[local_proc_index+1];
01590     unsigned num_local_elements = last_plus_one_element - first_local_element;
01591 
01592     idxtype* eind = new idxtype[num_local_elements*(ELEMENT_DIM+1)];
01593     idxtype* eptr = new idxtype[num_local_elements+1];
01594 
01595     if ( ! rMeshReader.IsFileFormatBinary() )
01596     {
01597         // Advance the file pointer to the first element I own.
01598         for (unsigned element_index = 0; element_index < first_local_element; element_index++)
01599         {
01600             ElementData element_data = rMeshReader.GetNextElementData();
01601         }
01602     }
01603 
01604     unsigned counter=0;
01605     for (unsigned element_index = 0; element_index < num_local_elements; element_index++)
01606     {
01607         ElementData element_data;
01608 
01609         if ( rMeshReader.IsFileFormatBinary() )
01610         {
01611             element_data = rMeshReader.GetElementData(first_local_element + element_index);
01612         }
01613         else
01614         {
01615             element_data = rMeshReader.GetNextElementData();
01616         }
01617 
01618         eptr[element_index] = counter;
01619         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01620         {
01621             eind[counter++] = element_data.NodeIndices[i];
01622         }
01623     }
01624     eptr[num_local_elements] = counter;
01625 
01626     rMeshReader.Reset();
01627 
01628     int numflag = 0; // METIS speak for C-style numbering
01629     int ncommonnodes = 3; // Connectivity degree. Manual recommends 3 for meshes made exclusively of tetrahedra.
01630     MPI_Comm communicator = PETSC_COMM_WORLD;
01631 
01632     idxtype* xadj;
01633     idxtype* adjncy;
01634 
01635     Timer::Reset();
01636     ParMETIS_V3_Mesh2Dual(element_distribution, eptr, eind,
01637                           &numflag, &ncommonnodes, &xadj, &adjncy, &communicator);
01638     //Timer::Print("ParMETIS Mesh2Dual");
01639 
01640     delete[] eind;
01641     delete[] eptr;
01642 
01643     int weight_flag = 0; // unweighted graph
01644     int n_constrains = 0; // number of weights that each vertex has (number of balance constrains)
01645     int n_subdomains = PetscTools::GetNumProcs();
01646     int options[3]; // extra options
01647     options[0] = 0; // ignore extra options
01648     int edgecut;
01649 
01650     idxtype* local_partition = new idxtype[num_local_elements];
01651 
01652 /*
01653  *  In order to use ParMETIS_V3_PartGeomKway, we need to sort out how to compute the coordinates of the
01654  *  centers of each element efficiently.
01655  *
01656  *  In the meantime use ParMETIS_V3_PartKway.
01657  */
01658 //    int n_dimensions = ELEMENT_DIM;
01659 //    float node_coordinates[num_local_elements*SPACE_DIM];
01660 //
01661 //    ParMETIS_V3_PartGeomKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
01662 //                             &n_dimensions, node_coordinates, &n_constrains, &n_subdomains, NULL, NULL,
01663 //                             options, &edgecut, local_partition, &communicator);
01664 
01665     Timer::Reset();
01666     ParMETIS_V3_PartKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
01667                          &n_constrains, &n_subdomains, NULL, NULL,
01668                          options, &edgecut, local_partition, &communicator);
01669     //Timer::Print("ParMETIS PartKway");
01670 
01671 
01672     idxtype* global_element_partition = new idxtype[num_elements];
01673 
01674     MPI_Allgatherv(local_partition, num_local_elements, MPI_INT,
01675                    global_element_partition, element_count, element_distribution, MPI_INT, PETSC_COMM_WORLD);
01676 
01677     delete[] local_partition;
01678 
01679     for (unsigned elem_index=0; elem_index<num_elements; elem_index++)
01680     {
01681         if ((unsigned) global_element_partition[elem_index] == local_proc_index)
01682         {
01683             rElementsOwned.insert(elem_index);
01684         }
01685     }
01686 
01687     rMeshReader.Reset();
01688     free(xadj);
01689     free(adjncy);
01690 
01691     unsigned num_nodes = rMeshReader.GetNumNodes();
01692 
01693     //unsigned global_node_partition[num_nodes]; // initialised to UNASSIGNED (do #define UNASSIGNED -1
01694     std::vector<unsigned> global_node_partition;
01695     global_node_partition.resize(num_nodes, UNASSIGNED_NODE);
01696 
01697     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
01698     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
01699 
01700 
01701     /*
01702      *  Work out node distribution based on initial element distribution returned by ParMETIS
01703      *
01704      *  In this loop we handle 4 different data structures:
01705      *      global_node_partition and rProcessorsOffset are global,
01706      *      rNodesOwned and rHaloNodesOwned are local.
01707      */
01708 
01709     std::vector<unsigned> element_access_order;
01710 
01711     if ( rMeshReader.IsFileFormatBinary() )
01712     {
01713         RandomNumberGenerator* p_gen = RandomNumberGenerator::Instance();
01714         p_gen->Reseed(0);
01715         p_gen->Shuffle(mTotalNumElements,element_access_order);
01716     }
01717     else
01718     {
01719         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
01720         {
01721             element_access_order.push_back(element_number);
01722         }
01723     }
01724 
01725 
01726     for (unsigned element_count = 0; element_count < mTotalNumElements; element_count++)
01727     {
01728         unsigned element_number = element_access_order[element_count];
01729         unsigned element_owner = global_element_partition[element_number];
01730 
01731         ElementData element_data;
01732 
01733         if ( rMeshReader.IsFileFormatBinary() )
01734         {
01735             element_data = rMeshReader.GetElementData(element_number);
01736         }
01737         else
01738         {
01739             element_data = rMeshReader.GetNextElementData();
01740         }
01741 
01742         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01743         {
01744             /*
01745              * For each node in this element, check whether it hasn't been assigned to another processor yet.
01746              * If so, assign it to the owner the element. Otherwise, consider it halo.
01747              */
01748             if ( global_node_partition[element_data.NodeIndices[i]] == UNASSIGNED_NODE )
01749             {
01750                 if (element_owner == local_proc_index)
01751                 {
01752                     rNodesOwned.insert(element_data.NodeIndices[i]);
01753                 }
01754 
01755                 global_node_partition[element_data.NodeIndices[i]] = element_owner;
01756 
01757                 // Offset is defined as the first node owned by a processor. We compute it incrementally.
01758                 // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
01759                 // offset a position.
01760                 for (unsigned proc=element_owner+1; proc<PetscTools::GetNumProcs(); proc++)
01761                 {
01762                     rProcessorsOffset[proc]++;
01763                 }
01764             }
01765             else
01766             {
01767                 if (element_owner == local_proc_index)
01768                 {
01769                     //if (rNodesOwned.find(element_data.NodeIndices[i]) == rNodesOwned.end())
01770                     if (global_node_partition[element_data.NodeIndices[i]] != local_proc_index)
01771                     {
01772                         rHaloNodesOwned.insert(element_data.NodeIndices[i]);
01773                     }
01774                 }
01775             }
01776         }
01777     }
01778 
01779     delete[] global_element_partition;
01780 
01781     /*
01782      * Refine element distribution. Add extra elements that parMETIS didn't consider initially but
01783      * include any node owned by the processor. This ensures that all the system matrix rows are
01784      * assembled locally.
01785      */
01786     rMeshReader.Reset();
01787 
01788     for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
01789     {
01790         ElementData element_data = rMeshReader.GetNextElementData();
01791 
01792         bool element_owned = false;
01793         std::set<unsigned> temp_halo_nodes;
01794 
01795         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01796         {
01797             if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
01798             {
01799                 element_owned = true;
01800                 rElementsOwned.insert(element_number);
01801             }
01802             else
01803             {
01804                 temp_halo_nodes.insert(element_data.NodeIndices[i]);
01805             }
01806         }
01807 
01808         if (element_owned)
01809         {
01810             rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
01811         }
01812     }
01813 
01814     rMeshReader.Reset();
01815 
01816     /*
01817      *  Once we know the offsets we can compute the permutation vector
01818      */
01819     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
01820 
01821     this->mNodesPermutation.resize(this->GetNumNodes());
01822 
01823     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
01824     {
01825         unsigned partition = global_node_partition[node_index];
01826         assert(partition!=UNASSIGNED_NODE);
01827 
01828         this->mNodesPermutation[node_index] = rProcessorsOffset[partition] + local_index[partition];
01829 
01830         local_index[partition]++;
01831     }
01832 
01833 }
01834 
01835 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01836 ChasteCuboid<SPACE_DIM> DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateBoundingBox() const
01837 {
01838     ChastePoint<SPACE_DIM> my_minimum_point;
01839     ChastePoint<SPACE_DIM> my_maximum_point;
01840 
01841     try
01842     {
01843         ChasteCuboid<SPACE_DIM> my_box=AbstractMesh<ELEMENT_DIM, SPACE_DIM>::CalculateBoundingBox();
01844         my_minimum_point=my_box.rGetLowerCorner();
01845         my_maximum_point=my_box.rGetUpperCorner();
01846     }
01847     catch (Exception& e)
01848     {
01849 #define COVERAGE_IGNORE
01850         PetscTools::ReplicateException(true);
01851         throw e;
01852 #undef COVERAGE_IGNORE
01853     }
01854 
01855     PetscTools::ReplicateException(false);
01856 
01857     c_vector<double, SPACE_DIM> global_minimum_point;
01858     c_vector<double, SPACE_DIM> global_maximum_point;
01859     MPI_Allreduce(&my_minimum_point.rGetLocation()[0], &global_minimum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
01860     MPI_Allreduce(&my_maximum_point.rGetLocation()[0], &global_maximum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
01861 
01862     ChastePoint<SPACE_DIM> min(global_minimum_point);
01863     ChastePoint<SPACE_DIM> max(global_maximum_point);
01864 
01865     return ChasteCuboid<SPACE_DIM>(min, max);
01866 }
01867 
01868 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01869 typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIteratorBegin() const
01870 {
01871     return mHaloNodes.begin();
01872 }
01873 
01874 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01875 typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIteratorEnd() const
01876 {
01877     return mHaloNodes.end();
01878 }
01879 
01881 // Explicit instantiation
01883 
01884 template class DistributedTetrahedralMesh<1,1>;
01885 template class DistributedTetrahedralMesh<1,2>;
01886 template class DistributedTetrahedralMesh<1,3>;
01887 template class DistributedTetrahedralMesh<2,2>;
01888 template class DistributedTetrahedralMesh<2,3>;
01889 template class DistributedTetrahedralMesh<3,3>;
01890 
01891 
01892 // Serialization for Boost >= 1.36
01893 #include "SerializationExportWrapperForCpp.hpp"
01894 EXPORT_TEMPLATE_CLASS_ALL_DIMS(DistributedTetrahedralMesh)
Generated on Thu Dec 22 13:00:16 2011 for Chaste by  doxygen 1.6.3