DistributedTetrahedralMesh.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2011
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 #include "DistributedTetrahedralMesh.hpp"
00030 
00031 #include <cassert>
00032 #include <sstream>
00033 #include <string>
00034 
00035 #include "Exception.hpp"
00036 #include "Element.hpp"
00037 #include "BoundaryElement.hpp"
00038 
00039 #include "PetscTools.hpp"
00040 #include "DistributedVectorFactory.hpp"
00041 #include "OutputFileHandler.hpp"
00042 
00043 #include "RandomNumberGenerator.hpp"
00044 
00045 #include "Timer.hpp"
00046 
00047 #include "petscao.h"
00048 
00049 #include "Warnings.hpp"
00050 
00052 //   IMPLEMENTATION
00054 
00055 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00056 DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DistributedTetrahedralMesh(DistributedTetrahedralMeshPartitionType::type partitioningMethod)
00057     :
00058       mTotalNumElements(0u),
00059       mTotalNumBoundaryElements(0u),
00060       mTotalNumNodes(0u),
00061       mMetisPartitioning(partitioningMethod)
00062 {
00063     if (ELEMENT_DIM == 1)
00064     {
00065         //No METIS partition is possible - revert to DUMB
00066         mMetisPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
00067     }
00068 }
00069 
00070 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00071 DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::~DistributedTetrahedralMesh()
00072 {
00073     for (unsigned i=0; i<this->mHaloNodes.size(); i++)
00074     {
00075         delete this->mHaloNodes[i];
00076     }
00077 }
00078 
00079 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00080 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetDistributedVectorFactory(DistributedVectorFactory* pFactory)
00081 {
00082     AbstractMesh<ELEMENT_DIM,SPACE_DIM>::SetDistributedVectorFactory(pFactory);
00083     mMetisPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
00084 }
00085 
00086 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00087 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ComputeMeshPartitioning(
00088     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00089     std::set<unsigned>& rNodesOwned,
00090     std::set<unsigned>& rHaloNodesOwned,
00091     std::set<unsigned>& rElementsOwned,
00092     std::vector<unsigned>& rProcessorsOffset)
00093 {
00095 
00096     if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY && PetscTools::IsParallel())
00097     {
00098         /*
00099          *  With ParMetisLibraryNodePartitioning we compute the element partition first
00100          *  and then we work out the node ownership.
00101          */
00102         ParMetisLibraryNodePartitioning(rMeshReader, rElementsOwned, rNodesOwned, rHaloNodesOwned, rProcessorsOffset);
00103     }
00104     else
00105     {
00106         /*
00107          *  Otherwise we compute the node partition and then we workout element distribution
00108          */
00109         if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::METIS_LIBRARY && PetscTools::IsParallel())
00110         {
00111             MetisLibraryNodePartitioning(rMeshReader, rNodesOwned, rProcessorsOffset);
00112         }
00113         else if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
00114         {
00115             PetscMatrixPartitioning(rMeshReader, rNodesOwned, rProcessorsOffset);
00116         }
00117         else
00118         {
00119             DumbNodePartitioning(rMeshReader, rNodesOwned);
00120         }
00121 
00122         if ( rMeshReader.HasNclFile() )
00123         {
00124             // Form a set of all the element indices we are going to own 
00125             // (union of the sets from the lines in the NCL file)
00126             for ( std::set<unsigned>::iterator iter=rNodesOwned.begin();
00127                   iter!=rNodesOwned.end();
00128                   ++iter )
00129             {
00130                 std::vector<unsigned> containing_elements = rMeshReader.GetContainingElementIndices( *iter );
00131                 rElementsOwned.insert( containing_elements.begin(), containing_elements.end() );
00132             }
00133             
00134             // Iterate through that set rather than mTotalNumElements (knowing that we own a least one node in each line)
00135             // Then read all the data into a node_index set
00136             std::set<unsigned> node_index_set;
00137             
00138             for ( std::set<unsigned>::iterator iter=rElementsOwned.begin();
00139                   iter!=rElementsOwned.end();
00140                   ++iter )
00141             {
00142                 ElementData element_data = rMeshReader.GetElementData( *iter );
00143                 node_index_set.insert( element_data.NodeIndices.begin(), element_data.NodeIndices.end() );
00144             }
00145             
00146             // Subtract off the rNodesOwned set to produce rHaloNodesOwned.
00147             // Note that rNodesOwned is a subset of node_index_set.
00148             // std::set_difference can't be used to fill a set...
00149             std::set<unsigned>::iterator iter_all = node_index_set.begin();
00150             std::set<unsigned>::iterator iter_owned = rNodesOwned.begin();
00151             while (iter_all != node_index_set.end() && iter_owned != rNodesOwned.end())
00152             {
00153                 if (*iter_all < *iter_owned) // Elements in sets are ordered
00154                 {
00155                     rHaloNodesOwned.insert(*iter_all++); // This node doesn't appear in rNodesOwned
00156                 }
00157                 else
00158                 {
00159                     iter_all++;
00160                     iter_owned++;
00161                 }
00162             }
00163             rHaloNodesOwned.insert(iter_all, node_index_set.end()); // Anything left over is halo
00164         }
00165         else
00166         {
00167             for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00168             {
00169                 ElementData element_data = rMeshReader.GetNextElementData();
00170     
00171                 bool element_owned = false;
00172                 std::set<unsigned> temp_halo_nodes;
00173     
00174                 for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00175                 {
00176                     if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
00177                     {
00178                         element_owned = true;
00179                         rElementsOwned.insert(element_number);
00180                     }
00181                     else
00182                     {
00183                         temp_halo_nodes.insert(element_data.NodeIndices[i]);
00184                     }
00185                 }
00186     
00187                 if (element_owned)
00188                 {
00189                     rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
00190                 }
00191             }
00192         }
00193         
00194         if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
00195         {
00196             PetscTools::Barrier();
00197             if(PetscTools::AmMaster())
00198             {
00199                 Timer::PrintAndReset("Element and halo node assignation");
00200             }
00201         }        
00202         
00203     }
00204     rMeshReader.Reset();
00205 }
00206 
00207 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00208 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructFromMeshReader(
00209     AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader)
00210 {
00211     std::set<unsigned> nodes_owned;
00212     std::set<unsigned> halo_nodes_owned;
00213     std::set<unsigned> elements_owned;
00214     std::vector<unsigned> proc_offsets;//(PetscTools::GetNumProcs());
00215 
00216     this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
00217     mTotalNumElements = rMeshReader.GetNumElements();
00218     mTotalNumBoundaryElements = rMeshReader.GetNumFaces();
00219     mTotalNumNodes = rMeshReader.GetNumNodes();
00220 
00221 
00222     PetscTools::Barrier();
00223     Timer::Reset();
00224     ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets);
00225     PetscTools::Barrier();
00226     //Timer::Print("partitioning");
00227 
00228     // Reserve memory
00229     this->mElements.reserve(elements_owned.size());
00230     this->mNodes.reserve(nodes_owned.size());
00231 
00232     if ( rMeshReader.IsFileFormatBinary() )
00233     {
00234         std::vector<double> coords;
00235         // Binary : load only the nodes which are needed
00236         for (std::set<unsigned>::const_iterator it=nodes_owned.begin(); it!=nodes_owned.end(); it++)
00237         {
00238             //Loop over wholey-owned nodes
00239             unsigned global_node_index=*it;
00240             coords = rMeshReader.GetNode(global_node_index);
00241             RegisterNode(global_node_index);
00242             Node<SPACE_DIM>* p_node =  new Node<SPACE_DIM>(global_node_index, coords, false);
00243 
00244 //Node attributes in binary format are not yet supported, see #1730
00245 //            for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
00246 //            {
00247 //                double attribute = rMeshReader.GetNodeAttributes()[i];
00248 //                p_node->AddNodeAttribute(attribute);
00249 //            }
00250 
00251             this->mNodes.push_back(p_node);
00252         }
00253         for (std::set<unsigned>::const_iterator it=halo_nodes_owned.begin(); it!=halo_nodes_owned.end(); it++)
00254         {
00255             //Loop over halo-owned nodes
00256             unsigned global_node_index=*it;
00257             coords = rMeshReader.GetNode(global_node_index);
00258             RegisterHaloNode(global_node_index);
00259             mHaloNodes.push_back(new Node<SPACE_DIM>(global_node_index, coords, false));
00260         }
00261     }
00262     else
00263     {
00264         // Ascii : Sequentially load all the nodes and store those owned (or halo-owned) by the process
00265         for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
00266         {
00267             std::vector<double> coords;
00269             coords = rMeshReader.GetNextNode();
00270 
00271             // The node is owned by the processor
00272             if (nodes_owned.find(node_index) != nodes_owned.end())
00273             {
00274                 RegisterNode(node_index);
00275                 Node<SPACE_DIM>* p_node =  new Node<SPACE_DIM>(node_index, coords, false);
00276 
00277                 for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
00278                 {
00279                     double attribute = rMeshReader.GetNodeAttributes()[i];
00280                     p_node->AddNodeAttribute(attribute);
00281                 }
00282 
00283                 this->mNodes.push_back(p_node);
00284             }
00285 
00286             // The node is a halo node in this processor
00287             if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
00288             {
00289                 RegisterHaloNode(node_index);
00290                 mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
00291             }
00292         }
00293     }
00294 
00295     if ( rMeshReader.IsFileFormatBinary() )
00296     {
00297         // Binary format, we loop only over the elements we have been assigned
00298         for (std::set<unsigned>::const_iterator elem_it=elements_owned.begin(); elem_it!=elements_owned.end(); elem_it++)
00299         {
00300             unsigned global_element_index=*elem_it;
00301             ElementData element_data;
00302             element_data = rMeshReader.GetElementData(global_element_index);
00303 
00304             std::vector<Node<SPACE_DIM>*> nodes;
00305             for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00306             {
00307                 //because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
00308                 nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
00309             }
00310 
00311             RegisterElement(global_element_index);
00312             Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(global_element_index, nodes);
00313             this->mElements.push_back(p_element);
00314 
00315             if (rMeshReader.GetNumElementAttributes() > 0)
00316             {
00317                 assert(rMeshReader.GetNumElementAttributes() == 1);
00318                 unsigned attribute_value = element_data.AttributeValue;
00319                 p_element->SetRegion(attribute_value);
00320             }
00321         }
00322     }
00323     else
00324     {
00325         // Load the elements owned by the processor
00326         for (unsigned element_index=0; element_index < mTotalNumElements; element_index++)
00327         {
00328             ElementData element_data;
00329 
00330             element_data = rMeshReader.GetNextElementData();
00331 
00332             // The element is owned by the processor
00333             if (elements_owned.find(element_index) != elements_owned.end())
00334             {
00335                 std::vector<Node<SPACE_DIM>*> nodes;
00336                 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
00337                 {
00338                     //because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
00339                     nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
00340                 }
00341 
00342                 RegisterElement(element_index);
00343 
00344                 Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(element_index, nodes);
00345                 this->mElements.push_back(p_element);
00346 
00347                 if (rMeshReader.GetNumElementAttributes() > 0)
00348                 {
00349                     assert(rMeshReader.GetNumElementAttributes() == 1);
00350                     unsigned attribute_value = element_data.AttributeValue;
00351                     p_element->SetRegion(attribute_value);
00352                 }
00353             }
00354         }
00355     }
00356 
00357     // Boundary nodes and elements
00358     try
00359     {
00360         for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
00361         {
00362             ElementData face_data = rMeshReader.GetNextFaceData();
00363             std::vector<unsigned> node_indices = face_data.NodeIndices;
00364 
00365             bool own = false;
00366 
00367             for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00368             {
00369                 // if I own this node
00370                 if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
00371                 {
00372                     own = true;
00373                     break;
00374                 }
00375             }
00376 
00377             if (!own)
00378             {
00379                 continue;
00380             }
00381 
00382             // Determine if this is a boundary face
00383             std::set<unsigned> containing_element_indices; // Elements that contain this face
00384             std::vector<Node<SPACE_DIM>*> nodes;
00385 
00386             for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
00387             {
00388                 //because we have populated mNodes and mHaloNodes above, we can now use this method,
00389                 //which SHOULD never throw (but it does).
00390                 try
00391                 {
00392                     nodes.push_back(this->GetNodeOrHaloNode(node_indices[node_index]));
00393                 }
00394                 catch (Exception &e)
00395                 {
00396                     std::stringstream message;
00397                     message << "Face does not appear in element file (Face " << face_index << " in "<<this->mMeshFileBaseName<< ")";
00398                     EXCEPTION(message.str().c_str());
00399                 }
00400             }
00401 
00402             // This is a boundary face
00403             // Ensure all its nodes are marked as boundary nodes
00404             for (unsigned j=0; j<nodes.size(); j++)
00405             {
00406                 if (!nodes[j]->IsBoundaryNode())
00407                 {
00408                     nodes[j]->SetAsBoundaryNode();
00409                     this->mBoundaryNodes.push_back(nodes[j]);
00410                 }
00411                 // Register the index that this bounday element will have with the node
00412                 nodes[j]->AddBoundaryElement(face_index);
00413             }
00414 
00415             RegisterBoundaryElement(face_index);
00416             BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>* p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(face_index, nodes);
00417             this->mBoundaryElements.push_back(p_boundary_element);
00418 
00419             if (rMeshReader.GetNumFaceAttributes() > 0)
00420             {
00421                 assert(rMeshReader.GetNumFaceAttributes() == 1);
00422                 unsigned attribute_value = face_data.AttributeValue;
00423                 p_boundary_element->SetRegion(attribute_value);
00424             }
00425         }
00426     }
00427     catch (Exception &e)
00428     {
00429         PetscTools::ReplicateException(true); //Bad face exception
00430         throw e;
00431     }
00432     //EXCEPTION("before deadlocking");
00433 //    std::cout << "before!" << std::flush <<std::endl;
00434     PetscTools::ReplicateException(false);
00435 //    std::cout << "went past this!" << std::flush <<std::endl;
00436 
00437     if (mMetisPartitioning != DistributedTetrahedralMeshPartitionType::DUMB && PetscTools::IsParallel())
00438     {
00439         assert(this->mNodesPermutation.size() != 0);
00440         // We reorder so that each process owns a contiguous set of the indices and we can then build a distributed vector factory.
00441         ReorderNodes();
00442 
00443         unsigned num_owned;
00444         unsigned rank = PetscTools::GetMyRank();
00445         if ( !PetscTools::AmTopMost() )
00446         {
00447             num_owned =  proc_offsets[rank+1]-proc_offsets[rank];
00448         }
00449         else
00450         {
00451             num_owned = mTotalNumNodes - proc_offsets[rank];
00452         }
00453 
00454         assert(!this->mpDistributedVectorFactory);
00455         this->mpDistributedVectorFactory = new DistributedVectorFactory(this->GetNumNodes(), num_owned);
00456     }
00457     else
00458     {
00459         // Dumb or sequential partition
00460         assert(this->mpDistributedVectorFactory);
00461     }
00462     rMeshReader.Reset();
00463 }
00464 
00465 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00466 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalNodes() const
00467 {
00468     return this->mNodes.size();
00469 }
00470 
00471 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00472 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumHaloNodes() const
00473 {
00474     return this->mHaloNodes.size();
00475 }
00476 
00477 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00478 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalElements() const
00479 {
00480     return this->mElements.size();
00481 }
00482 
00483 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00484 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumLocalBoundaryElements() const
00485 {
00486     return this->mBoundaryElements.size();
00487 }
00488 
00489 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00490 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumNodes() const
00491 {
00492     return mTotalNumNodes;
00493 }
00494 
00495 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00496 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumAllNodes() const
00497 {
00498     return mTotalNumNodes;
00499 }
00500 
00501 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00502 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumElements() const
00503 {
00504     return mTotalNumElements;
00505 }
00506 
00507 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00508 DistributedTetrahedralMeshPartitionType::type DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetPartitionType() const
00509 {
00510     return mMetisPartitioning;
00511 }
00512 
00513 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00514 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNumBoundaryElements() const
00515 {
00516     return mTotalNumBoundaryElements;
00517 }
00518 
00519 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00520 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIndices(std::vector<unsigned>& rHaloIndices) const
00521 {
00522     //Make sure the output vector is empty
00523     rHaloIndices.clear();
00524     for (unsigned i=0; i<mHaloNodes.size(); i++)
00525     {
00526         rHaloIndices.push_back(mHaloNodes[i]->GetIndex());
00527     }
00528 }
00529 
00530 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00531 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SetElementOwnerships()
00532 {
00533     // All the local elements are owned by the processor (obviously...)
00534     //Does nothing - unlike the non-distributed version
00535 }
00536 
00537 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00538 bool DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfElement( unsigned elementIndex )
00539 {
00540     try
00541     {
00542         return(AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfElement(elementIndex));
00543     }
00544     catch(Exception& e)      // we don't own the element
00545     {
00546         return false;
00547     }
00548 }
00549 
00550 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00551 bool DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfBoundaryElement( unsigned faceIndex )
00552 {
00553     try
00554     {
00555         return(AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateDesignatedOwnershipOfBoundaryElement(faceIndex));
00556     }
00557     catch(Exception& e)      //  we don't own the face
00558     {
00559         return false;
00560     }
00561 }
00562 
00563 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00564 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterNode(unsigned index)
00565 {
00566     mNodesMapping[index] = this->mNodes.size();
00567 }
00568 
00569 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00570 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterHaloNode(unsigned index)
00571 {
00572     mHaloNodesMapping[index] = mHaloNodes.size();
00573 }
00574 
00575 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00576 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterElement(unsigned index)
00577 {
00578     mElementsMapping[index] = this->mElements.size();
00579 }
00580 
00581 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00582 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::RegisterBoundaryElement(unsigned index)
00583 {
00584     mBoundaryElementsMapping[index] = this->mBoundaryElements.size();
00585 }
00586 
00587 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00588 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveNodeMapping(unsigned index) const
00589 {
00590     std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index);
00591 
00592     if (node_position == mNodesMapping.end())
00593     {
00594         std::stringstream message;
00595         message << "Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank();
00596         EXCEPTION(message.str().c_str());
00597     }
00598     return node_position->second;
00599 }
00600 
00601 //template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00602 //unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
00603 //{
00604 //    assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
00605 //    return mHaloNodesMapping[index];
00606 //}
00607 
00608 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00609 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveElementMapping(unsigned index) const
00610 {
00611     std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index);
00612 
00613     if (element_position == mElementsMapping.end())
00614     {
00615         std::stringstream message;
00616         message << "Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00617         EXCEPTION(message.str().c_str());
00618     }
00619 
00620     return element_position->second;
00621 }
00622 
00623 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00624 unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveBoundaryElementMapping(unsigned index) const
00625 {
00626     std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index);
00627 
00628     if (boundary_element_position == mBoundaryElementsMapping.end())
00629     {
00630         std::stringstream message;
00631         message << "Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank();
00632         EXCEPTION(message.str().c_str());
00633     }
00634 
00635     return boundary_element_position->second;
00636 }
00637 
00638 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00639 Node<SPACE_DIM> * DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetNodeOrHaloNode(unsigned index) const
00640 {
00641     std::map<unsigned, unsigned>::const_iterator node_position;
00642     //First search the halo
00643     if ((node_position=mHaloNodesMapping.find(index)) != mHaloNodesMapping.end())
00644     {
00645         return mHaloNodes[node_position->second];
00646     }
00647     //First search the owned node
00648     if ((node_position=mNodesMapping.find(index)) != mNodesMapping.end())
00649     {
00650         //Found an owned node
00651         return this->mNodes[node_position->second];
00652     }
00653     //Not here
00654     std::stringstream message;
00655     message << "Requested node/halo " << index << " does not belong to processor " << PetscTools::GetMyRank();
00656     EXCEPTION(message.str().c_str());
00657 }
00658 
00659 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00660 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::DumbNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00661                                                                               std::set<unsigned>& rNodesOwned)
00662 {
00663     if (this->mpDistributedVectorFactory)
00664     {
00665         // A distribution is given by the factory
00666         if (this->mpDistributedVectorFactory->GetProblemSize() != mTotalNumNodes)
00667         {
00668             // Reset stuff
00669             this->mpDistributedVectorFactory = NULL;
00670             this->mTotalNumNodes = 0u;
00671             this->mTotalNumElements = 0u;
00672             this->mTotalNumBoundaryElements = 0u;
00673             EXCEPTION("The distributed vector factory size in the mesh doesn't match the total number of nodes.");
00674         }
00675     }
00676     else
00677     {
00678         this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
00679     }
00680     for (unsigned node_index = this->mpDistributedVectorFactory->GetLow();
00681          node_index < this->mpDistributedVectorFactory->GetHigh();
00682          node_index++)
00683     {
00684          rNodesOwned.insert(node_index);
00685     }
00686 }
00687 
00688 
00689 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00690 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::PetscMatrixPartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00691                                                                                  std::set<unsigned>& rNodesOwned,
00692                                                                                  std::vector<unsigned>& rProcessorsOffset)
00693 {
00694     assert(PetscTools::IsParallel());
00695     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
00696 
00697     unsigned num_nodes = rMeshReader.GetNumNodes();
00698     PetscInt num_procs = PetscTools::GetNumProcs();
00699     unsigned local_proc_index = PetscTools::GetMyRank();
00700 
00701     unsigned num_elements = rMeshReader.GetNumElements();
00702     unsigned num_local_elements = num_elements / num_procs;
00703     unsigned first_local_element = num_local_elements * local_proc_index;
00704     if (PetscTools::AmTopMost())
00705     {
00706         // Take the excess elements
00707         num_local_elements += num_elements - (num_local_elements * num_procs);
00708     }
00709 
00710     PetscTools::Barrier();
00711     Timer::Reset();    
00712 
00713     /*
00714      * Create PETSc matrix which will have 1 for adjacent nodes.
00715      */
00716     Mat connectivity_matrix;
00718     PetscTools::SetupMat(connectivity_matrix, num_nodes, num_nodes, 54, PETSC_DECIDE, PETSC_DECIDE, false);
00719     
00720     if ( ! rMeshReader.IsFileFormatBinary() )
00721     {        
00722         // Advance the file pointer to the first element I own
00723         for (unsigned element_index = 0; element_index < first_local_element; element_index++)
00724         {
00725             ElementData element_data = rMeshReader.GetNextElementData();
00726         }
00727     }
00728 
00729     // In the loop below we assume that there exist edges between any pair of nodes in an element. This is
00730     // a reasonable assumption for triangles and tetrahedra. This won't be the case for squares or hexahedra 
00731     // (or higher order elements). We allow ELEMENT_DIM smaller than SPACE_DIM in case this is a 2D mesh in
00732     // a 3D space. 
00733     assert(SPACE_DIM >= ELEMENT_DIM);
00734     
00735     for (unsigned element_index = 0; element_index < num_local_elements; element_index++)
00736     {
00737         ElementData element_data;
00738         
00739         if ( rMeshReader.IsFileFormatBinary() )
00740         {
00741             element_data = rMeshReader.GetElementData(first_local_element + element_index);
00742         }
00743         else
00744         {
00745             element_data = rMeshReader.GetNextElementData();
00746         }        
00747 
00748         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00749         {
00750             for (unsigned j=0; j<i; j++)
00751             {
00752                 unsigned row = element_data.NodeIndices[i];
00753                 unsigned col = element_data.NodeIndices[j];
00754                 MatSetValue(connectivity_matrix, row, col, 1.0, INSERT_VALUES);
00755                 MatSetValue(connectivity_matrix, col, row, 1.0, INSERT_VALUES);
00756             }
00757         }
00758     }
00760     MatAssemblyBegin(connectivity_matrix, MAT_FINAL_ASSEMBLY);
00761     MatAssemblyEnd(connectivity_matrix, MAT_FINAL_ASSEMBLY);
00762 
00763     PetscTools::Barrier();
00764     if(PetscTools::AmMaster())
00765     {
00766         Timer::PrintAndReset("Connectivity matrix assembly");
00767     }    
00768 
00769     rMeshReader.Reset();   
00770 
00771     PetscInt connectivity_matrix_lo;
00772     PetscInt connectivity_matrix_hi;
00773     MatGetOwnershipRange(connectivity_matrix, &connectivity_matrix_lo, &connectivity_matrix_hi);
00774 
00775     unsigned num_local_nodes = connectivity_matrix_hi - connectivity_matrix_lo;
00776 
00777     MatInfo matrix_info;
00778     MatGetInfo(connectivity_matrix, MAT_LOCAL, &matrix_info);
00779     unsigned local_num_nz = (unsigned) matrix_info.nz_used;
00780 
00781     size_t size = (num_local_nodes+1)*sizeof(PetscInt);
00782     void* ptr;
00783     PetscMalloc(size, &ptr);
00784     PetscInt* local_ia = (PetscInt*) ptr;
00785     size = local_num_nz*sizeof(PetscInt);
00786     PetscMalloc(size, &ptr);
00787     PetscInt* local_ja = (PetscInt*) ptr;
00788 
00789     PetscInt row_num_nz;
00790     const PetscInt* column_indices;
00791         
00792     local_ia[0]=0;
00793     for (PetscInt row_global_index=connectivity_matrix_lo; row_global_index<connectivity_matrix_hi; row_global_index++) 
00794     {
00795         MatGetRow(connectivity_matrix, row_global_index, &row_num_nz, &column_indices, PETSC_NULL);
00796 
00797         unsigned row_local_index = row_global_index - connectivity_matrix_lo;       
00798         local_ia[row_local_index+1] = local_ia[row_local_index] + row_num_nz;        
00799         for (PetscInt col_index=0; col_index<row_num_nz; col_index++)
00800         {
00801            local_ja[local_ia[row_local_index] + col_index] =  column_indices[col_index];
00802         }        
00803 
00804         MatRestoreRow(connectivity_matrix, row_global_index, &row_num_nz,&column_indices, PETSC_NULL);
00805     }
00806 
00807     MatDestroy(connectivity_matrix);
00808 
00809     // Convert to an adjacency matrix
00810     Mat adj_matrix;
00811     MatCreateMPIAdj(PETSC_COMM_WORLD, num_local_nodes, num_nodes, local_ia, local_ja, PETSC_NULL, &adj_matrix);
00812 
00813     PetscTools::Barrier();
00814     if(PetscTools::AmMaster())
00815     {
00816         Timer::PrintAndReset("Adjacency matrix creation");
00817     }    
00818 
00819     // Get PETSc to call ParMETIS
00820     MatPartitioning part;
00821     MatPartitioningCreate(PETSC_COMM_WORLD, &part);
00822     MatPartitioningSetAdjacency(part, adj_matrix);
00823     MatPartitioningSetFromOptions(part);
00824     IS new_process_numbers;
00825     MatPartitioningApply(part, &new_process_numbers);
00826     MatPartitioningDestroy(part);
00827     
00829     MatDestroy(adj_matrix);
00830 
00831     PetscTools::Barrier();
00832     if(PetscTools::AmMaster())
00833     {
00834         Timer::PrintAndReset("PETSc-ParMETIS call");
00835     }    
00836                 
00837     // Figure out who owns what - processor offsets
00838     PetscInt* num_nodes_per_process = new PetscInt[num_procs];        
00839 #if (PETSC_VERSION_MAJOR == 3) //PETSc 3.x.x
00840     ISPartitioningCount(new_process_numbers, num_procs, num_nodes_per_process);
00841 #else
00842     ISPartitioningCount(new_process_numbers, num_nodes_per_process);
00843 #endif
00844        
00845     rProcessorsOffset.resize(num_procs);
00846     rProcessorsOffset[0] = 0;
00847     for (PetscInt i=1; i<num_procs; i++)
00848     {
00849         rProcessorsOffset[i] = rProcessorsOffset[i-1] + num_nodes_per_process[i-1];
00850     }
00851     unsigned my_num_nodes = num_nodes_per_process[local_proc_index];
00852     delete[] num_nodes_per_process;
00853     
00854     // Figure out who owns what - new node numbering
00855     IS new_global_node_indices;
00856     ISPartitioningToNumbering(new_process_numbers, &new_global_node_indices);
00857     
00858     // Index sets only give local information, we want global
00859     AO ordering;
00860     AOCreateBasicIS(new_global_node_indices, PETSC_NULL /* natural ordering */, &ordering);
00861     
00862     // The locally owned range under the new numbering
00863     PetscInt* local_range = new PetscInt[my_num_nodes];
00864     for (unsigned i=0; i<my_num_nodes; i++)
00865     {
00866         local_range[i] = rProcessorsOffset[local_proc_index] + i;
00867     }
00868     AOApplicationToPetsc(ordering, my_num_nodes, local_range);    
00869     //AOView(ordering, PETSC_VIEWER_STDOUT_WORLD);
00870         
00871     // Fill in rNodesOwned (TODO: do something smarter with iterators...)
00872     for (unsigned i=0; i<my_num_nodes; i++)
00873     {
00874         rNodesOwned.insert(local_range[i]);
00875     }   
00876     delete[] local_range;    
00877     
00878     /*
00879      *  Once we know the offsets we can compute the permutation vector
00880      */
00881     PetscInt* global_range = new PetscInt[num_nodes];
00882     for (unsigned i=0; i<num_nodes; i++)
00883     {
00884         global_range[i] = i;
00885     }
00886     AOPetscToApplication(ordering, num_nodes, global_range);
00887 
00888     this->mNodesPermutation.resize(num_nodes);
00889 
00890     for (unsigned node_index=0; node_index<num_nodes; node_index++)
00891     {
00892         this->mNodesPermutation[node_index] = global_range[node_index];
00893     }
00894     delete[] global_range;
00895 
00896     AODestroy(ordering);
00897     ISDestroy(new_process_numbers);
00898     ISDestroy(new_global_node_indices);
00899     
00900     PetscTools::Barrier();
00901     if(PetscTools::AmMaster())
00902     {
00903         Timer::PrintAndReset("PETSc-ParMETIS output manipulation");
00904     }    
00905 }
00906 
00907 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00908 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::MetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
00909                                                                                   std::set<unsigned>& rNodesOwned,
00910                                                                                   std::vector<unsigned>& rProcessorsOffset)
00911 {
00912     assert(PetscTools::IsParallel());
00913 
00914     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
00915 
00916     int nn = rMeshReader.GetNumNodes();
00917     idxtype* npart = new idxtype[nn];
00918     assert(npart != NULL);
00919 
00920     //Only the master process will access the element data and perform the partitioning
00921     if (PetscTools::AmMaster())
00922     {
00923         int ne = rMeshReader.GetNumElements();
00924         idxtype* elmnts = new idxtype[ne * (ELEMENT_DIM+1)];
00925         assert(elmnts != NULL);
00926 
00927         unsigned counter=0;
00928         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
00929         {
00930             ElementData element_data = rMeshReader.GetNextElementData();
00931 
00932             for (unsigned i=0; i<ELEMENT_DIM+1; i++)
00933             {
00934                 elmnts[counter++] = element_data.NodeIndices[i];
00935             }
00936         }
00937         rMeshReader.Reset();
00938 
00939         int etype;
00940 
00941         switch (ELEMENT_DIM)
00942         {
00943             case 2:
00944                 etype = 1; //1 is Metis speak for triangles
00945                 break;
00946             case 3:
00947                 etype = 2; //2 is Metis speak for tetrahedra
00948                 break;
00949             default:
00950                 NEVER_REACHED;
00951         }
00952 
00953         int numflag = 0; //0 means C-style numbering is assumed
00954         int nparts = PetscTools::GetNumProcs();
00955         int edgecut;
00956         idxtype* epart = new idxtype[ne];
00957         assert(epart != NULL);
00958 
00959         Timer::Reset();
00960         METIS_PartMeshNodal(&ne, &nn, elmnts, &etype, &numflag, &nparts, &edgecut, epart, npart);//, wgetflag, vwgt);
00961         //Timer::Print("METIS call");
00962 
00963         delete[] elmnts;
00964         delete[] epart;
00965     }
00966 
00967     //Here's the new bottle-neck: share all the node ownership data
00968     //idxtype is normally int (see metis-4.0/Lib/struct.h 17-22)
00969     assert(sizeof(idxtype) == sizeof(int));
00970     MPI_Bcast(npart /*data*/, nn /*size*/, MPI_INT, 0 /*From Master*/, PETSC_COMM_WORLD);
00971 
00972     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
00973     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
00974 
00975     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
00976     {
00977         unsigned part_read = npart[node_index];
00978 
00979         // METIS output says I own this node
00980         if (part_read == PetscTools::GetMyRank())
00981         {
00982             rNodesOwned.insert(node_index);
00983         }
00984 
00985         // Offset is defined as the first node owned by a processor. We compute it incrementally.
00986         // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
00987         // offset a position.
00988         for (unsigned proc=part_read+1; proc<PetscTools::GetNumProcs(); proc++)
00989         {
00990             rProcessorsOffset[proc]++;
00991         }
00992     }
00993 
00994     /*
00995      *  Once we know the offsets we can compute the permutation vector
00996      */
00997     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
00998 
00999     this->mNodesPermutation.resize(this->GetNumNodes());
01000 
01001     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
01002     {
01003         unsigned part_read = npart[node_index];
01004 
01005         this->mNodesPermutation[node_index] = rProcessorsOffset[part_read] + local_index[part_read];
01006 
01007         local_index[part_read]++;
01008     }
01009 
01010     delete[] npart;
01011 }
01012 
01013 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01014 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ReorderNodes()
01015 {
01016     assert(PetscTools::IsParallel());
01017 
01018     // Need to rebuild global-local maps
01019     mNodesMapping.clear();
01020     mHaloNodesMapping.clear();
01021 
01022     // Update indices
01023     for (unsigned index=0; index<this->mNodes.size(); index++)
01024     {
01025         unsigned old_index = this->mNodes[index]->GetIndex();
01026         unsigned new_index = this->mNodesPermutation[old_index];
01027 
01028         this->mNodes[index]->SetIndex(new_index);
01029         mNodesMapping[new_index] = index;
01030     }
01031 
01032     for (unsigned index=0; index<mHaloNodes.size(); index++)
01033     {
01034         unsigned old_index = mHaloNodes[index]->GetIndex();
01035         unsigned new_index = this->mNodesPermutation[old_index];
01036 
01037         mHaloNodes[index]->SetIndex(new_index);
01038         mHaloNodesMapping[new_index] = index;
01039     }
01040 }
01041 
01042 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01043 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructLinearMesh(unsigned width)
01044 {
01045     assert(ELEMENT_DIM == 1);
01046 
01047      //Check that there are enough nodes to make the parallelisation worthwhile
01048     if (width==0)
01049     {
01050         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01051     }
01052     //Use dumb partition so that archiving doesn't permute anything
01053     mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
01054     mTotalNumNodes=width+1;
01055     mTotalNumBoundaryElements=2u;
01056     mTotalNumElements=width;
01057 
01058     //Use DistributedVectorFactory to make a dumb partition of the nodes
01059     assert(!this->mpDistributedVectorFactory);
01060     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
01061     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01062     {
01063         //It's a short mesh and this process owns no nodes
01064         return;
01065     }
01066 
01067     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01068      * higher numbered process may have dropped out of this construction altogether
01069      * (because is has no local ownership)
01070      */
01071     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01072 
01073     unsigned lo_node=this->mpDistributedVectorFactory->GetLow();
01074     unsigned hi_node=this->mpDistributedVectorFactory->GetHigh();
01075     if (!PetscTools::AmMaster())
01076     {
01077         //Allow for a halo node
01078         lo_node--;
01079     }
01080     if (!am_top_most)
01081     {
01082         //Allow for a halo node
01083         hi_node++;
01084     }
01085     Node<SPACE_DIM>* p_old_node=NULL;
01086     for (unsigned node_index=lo_node; node_index<hi_node; node_index++)
01087     {
01088         // create node or halo-node
01089         Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, node_index==0 || node_index==width, node_index);
01090         if (node_index<this->mpDistributedVectorFactory->GetLow() ||
01091             node_index==this->mpDistributedVectorFactory->GetHigh() )
01092         {
01093             //Beyond left or right it's a halo node
01094             RegisterHaloNode(node_index);
01095             mHaloNodes.push_back(p_node);
01096         }
01097         else
01098         {
01099             RegisterNode(node_index);
01100             this->mNodes.push_back(p_node); // create node
01101 
01102             //A boundary face has to be wholely owned by the process
01103             //Since, when ELEMENT_DIM>1 we have *at least* boundary node as a non-halo
01104             if (node_index==0) // create left boundary node and boundary element
01105             {
01106                 this->mBoundaryNodes.push_back(p_node);
01107                 RegisterBoundaryElement(0);
01108                 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(0, p_node) );
01109             }
01110             if (node_index==width) // create right boundary node and boundary element
01111             {
01112                 this->mBoundaryNodes.push_back(p_node);
01113                 RegisterBoundaryElement(1);
01114                 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(1, p_node) );
01115             }
01116             }
01117         if (node_index>lo_node) // create element
01118         {
01119             std::vector<Node<SPACE_DIM>*> nodes;
01120             nodes.push_back(p_old_node);
01121             nodes.push_back(p_node);
01122             RegisterElement(node_index-1);
01123             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(node_index-1, nodes) );
01124         }
01125         //Keep track of the node which we've just created
01126         p_old_node=p_node;
01127     }
01128 }
01129 
01130 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01131 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructRectangularMesh(unsigned width, unsigned height, bool stagger)
01132 {
01133     assert(SPACE_DIM == 2);
01134     assert(ELEMENT_DIM == 2);
01135     //Check that there are enough nodes to make the parallelisation worthwhile
01136     if (height==0)
01137     {
01138         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01139     }
01140     //Use dumb partition so that archiving doesn't permute anything
01141     mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
01142 
01143     mTotalNumNodes=(width+1)*(height+1);
01144     mTotalNumBoundaryElements=(width+height)*2;
01145     mTotalNumElements=width*height*2;
01146 
01147     //Use DistributedVectorFactory to make a dumb partition of space
01148     DistributedVectorFactory y_partition(height+1);
01149     unsigned lo_y = y_partition.GetLow();
01150     unsigned hi_y = y_partition.GetHigh();
01151     //Dumb partition of nodes has to be such that each process gets complete slices
01152     assert(!this->mpDistributedVectorFactory);
01153     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*y_partition.GetLocalOwnership());
01154     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01155     {
01156         //It's a short mesh and this process owns no nodes
01157         return;
01158     }
01159     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01160      * higher numbered process may have dropped out of this construction altogether
01161      * (because is has no local ownership)
01162      */
01163     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01164 
01165 
01166     if (!PetscTools::AmMaster())
01167     {
01168         //Allow for a halo node
01169         lo_y--;
01170     }
01171     if (!am_top_most)
01172     {
01173         //Allow for a halo node
01174         hi_y++;
01175     }
01176 
01177     //Construct the nodes
01178     for (unsigned j=lo_y; j<hi_y; j++)
01179     {
01180         for (unsigned i=0; i<width+1; i++)
01181         {
01182             bool is_boundary=false;
01183             if (i==0 || j==0 || i==width || j==height)
01184             {
01185                 is_boundary=true;
01186             }
01187             unsigned global_node_index=((width+1)*(j) + i); //Verified from sequential
01188             Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j);
01189             if (j<y_partition.GetLow() || j==y_partition.GetHigh() )
01190             {
01191                 //Beyond left or right it's a halo node
01192                 RegisterHaloNode(global_node_index);
01193                 mHaloNodes.push_back(p_node);
01194             }
01195             else
01196             {
01197                 RegisterNode(global_node_index);
01198                 this->mNodes.push_back(p_node);
01199             }
01200             if (is_boundary)
01201             {
01202                 this->mBoundaryNodes.push_back(p_node);
01203             }
01204         }
01205     }
01206 
01207     //Construct the boundary elements
01208     unsigned belem_index;
01209     //Top
01210     if (am_top_most)
01211     {
01212        for (unsigned i=0; i<width; i++)
01213        {
01214             std::vector<Node<SPACE_DIM>*> nodes;
01215             nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i ));
01216             nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i+1 ));
01217             belem_index=i;
01218             RegisterBoundaryElement(belem_index);
01219             this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01220         }
01221     }
01222 
01223     //Right
01224     for (unsigned j=lo_y+1; j<hi_y; j++)
01225     {
01226         std::vector<Node<SPACE_DIM>*> nodes;
01227         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1)-1 ));
01228         nodes.push_back(GetNodeOrHaloNode( (width+1)*j-1 ));
01229         belem_index=width+j-1;
01230         RegisterBoundaryElement(belem_index);
01231         this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01232     }
01233 
01234     //Bottom
01235     if (PetscTools::AmMaster())
01236     {
01237         for (unsigned i=0; i<width; i++)
01238         {
01239             std::vector<Node<SPACE_DIM>*> nodes;
01240             nodes.push_back(GetNodeOrHaloNode( i+1 ));
01241             nodes.push_back(GetNodeOrHaloNode( i ));
01242             belem_index=width+height+i;
01243             RegisterBoundaryElement(belem_index);
01244             this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01245         }
01246     }
01247 
01248     //Left
01249     for (unsigned j=lo_y; j<hi_y-1; j++)
01250     {
01251         std::vector<Node<SPACE_DIM>*> nodes;
01252         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1) ));
01253         nodes.push_back(GetNodeOrHaloNode( (width+1)*(j) ));
01254         belem_index=2*width+height+j;
01255         RegisterBoundaryElement(belem_index);
01256         this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
01257     }
01258 
01259 
01260     //Construct the elements
01261     unsigned elem_index;
01262     for (unsigned j=lo_y; j<hi_y-1; j++)
01263     {
01264         for (unsigned i=0; i<width; i++)
01265         {
01266             unsigned parity=(i+(height-j))%2;//Note that parity is measured from the top-left (not bottom left) for historical reasons
01267             unsigned nw=(j+1)*(width+1)+i; //ne=nw+1
01268             unsigned sw=(j)*(width+1)+i;   //se=sw+1
01269             std::vector<Node<SPACE_DIM>*> upper_nodes;
01270             upper_nodes.push_back(GetNodeOrHaloNode( nw ));
01271             upper_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
01272             if (stagger==false  || parity == 1)
01273             {
01274                 upper_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
01275             }
01276             else
01277             {
01278                 upper_nodes.push_back(GetNodeOrHaloNode( sw ));
01279             }
01280             elem_index=2*(j*width+i);
01281             RegisterElement(elem_index);
01282             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,upper_nodes));
01283             std::vector<Node<SPACE_DIM>*> lower_nodes;
01284             lower_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
01285             lower_nodes.push_back(GetNodeOrHaloNode( sw ));
01286             if (stagger==false  ||parity == 1)
01287             {
01288                 lower_nodes.push_back(GetNodeOrHaloNode( nw ));
01289             }
01290             else
01291             {
01292                 lower_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
01293             }
01294             elem_index++;
01295             RegisterElement(elem_index);
01296             this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,lower_nodes));
01297         }
01298     }
01299 
01300 }
01301 
01302 
01303 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01304 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructCuboid(unsigned width,
01305         unsigned height,
01306         unsigned depth)
01307 {
01308     assert(SPACE_DIM == 3);
01309     assert(ELEMENT_DIM == 3);
01310     //Check that there are enough nodes to make the parallelisation worthwhile
01311     if (depth==0)
01312     {
01313         EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
01314     }
01315 
01316     //Use dumb partition so that archiving doesn't permute anything
01317     mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
01318 
01319     mTotalNumNodes=(width+1)*(height+1)*(depth+1);
01320     mTotalNumBoundaryElements=((width*height)+(width*depth)+(height*depth))*4;//*2 for top-bottom, *2 for tessellating each unit square
01321     mTotalNumElements=width*height*depth*6;
01322 
01323     //Use DistributedVectorFactory to make a dumb partition of space
01324     DistributedVectorFactory z_partition(depth+1);
01325     unsigned lo_z = z_partition.GetLow();
01326     unsigned hi_z = z_partition.GetHigh();
01327 
01328     //Dumb partition of nodes has to be such that each process gets complete slices
01329     assert(!this->mpDistributedVectorFactory);
01330     this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*(height+1)*z_partition.GetLocalOwnership());
01331     if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
01332     {
01333 #define COVERAGE_IGNORE
01334         // It's a short mesh and this process owns no nodes.  This problem can only occur on 4 or more processes,
01335         // so we can't cover it - coverage only runs with 1 and 2 processes.
01336         std::stringstream msg;
01337         msg << "No nodes were assigned to processor " << PetscTools::GetMyRank() << " in DistributedTetrahedralMesh::ConstructCuboid()";
01338         WARNING(msg.str());
01339         return;
01340 #undef COVERAGE_IGNORE
01341     }
01342     /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
01343      * higher numbered process may have dropped out of this construction altogether
01344      * (because is has no local ownership)
01345      */
01346     bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
01347 
01348 
01349 
01350     if (!PetscTools::AmMaster())
01351     {
01352         //Allow for a halo node
01353         lo_z--;
01354     }
01355     if (!am_top_most)
01356     {
01357         //Allow for a halo node
01358         hi_z++;
01359     }
01360 
01361     //Construct the nodes
01362     unsigned global_node_index;
01363     for (unsigned k=lo_z; k<hi_z; k++)
01364     {
01365         for (unsigned j=0; j<height+1; j++)
01366         {
01367             for (unsigned i=0; i<width+1; i++)
01368             {
01369                 bool is_boundary = false;
01370                 if (i==0 || j==0 || k==0 || i==width || j==height || k==depth)
01371                 {
01372                     is_boundary = true;
01373                 }
01374                 global_node_index = (k*(height+1)+j)*(width+1)+i;
01375 
01376                 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j, k);
01377 
01378                 if (k<z_partition.GetLow() || k==z_partition.GetHigh() )
01379                 {
01380                     //Beyond left or right it's a halo node
01381                     RegisterHaloNode(global_node_index);
01382                     mHaloNodes.push_back(p_node);
01383                 }
01384                 else
01385                 {
01386                     RegisterNode(global_node_index);
01387                     this->mNodes.push_back(p_node);
01388                 }
01389 
01390                 if (is_boundary)
01391                 {
01392                     this->mBoundaryNodes.push_back(p_node);
01393                 }
01394             }
01395         }
01396     }
01397 
01398     // Construct the elements
01399 
01400     unsigned element_nodes[6][4] = {{0, 1, 5, 7}, {0, 1, 3, 7},
01401                                         {0, 2, 3, 7}, {0, 2, 6, 7},
01402                                         {0, 4, 6, 7}, {0, 4, 5, 7}};
01403     std::vector<Node<SPACE_DIM>*> tetrahedra_nodes;
01404 
01405     for (unsigned k=lo_z; k<hi_z-1; k++)
01406     {
01407         unsigned belem_index = 0;
01408         if (k != 0)
01409         {
01410             // height*width squares on upper face, k layers of 2*height+2*width square aroun
01411             belem_index =   2*(height*width+k*2*(height+width));
01412         }
01413 
01414         for (unsigned j=0; j<height; j++)
01415         {
01416             for (unsigned i=0; i<width; i++)
01417             {
01418                 // Compute the nodes' index
01419                 unsigned global_node_indices[8];
01420                 unsigned local_node_index = 0;
01421 
01422                 for (unsigned z = 0; z < 2; z++)
01423                 {
01424                     for (unsigned y = 0; y < 2; y++)
01425                     {
01426                         for (unsigned x = 0; x < 2; x++)
01427                         {
01428                             global_node_indices[local_node_index] = i+x+(width+1)*(j+y+(height+1)*(k+z));
01429 
01430                             local_node_index++;
01431                         }
01432                     }
01433                 }
01434 
01435                 for (unsigned m = 0; m < 6; m++)
01436                 {
01437                     // Tetrahedra #m
01438 
01439                     tetrahedra_nodes.clear();
01440 
01441                     for (unsigned n = 0; n < 4; n++)
01442                     {
01443                         tetrahedra_nodes.push_back(GetNodeOrHaloNode( global_node_indices[element_nodes[m][n]] ));
01444                     }
01445                     unsigned elem_index = 6 * ((k*height+j)*width+i)+m;
01446                     RegisterElement(elem_index);
01447                     this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index, tetrahedra_nodes));
01448                 }
01449 
01450                 //Are we at a boundary?
01451                 std::vector<Node<SPACE_DIM>*> triangle_nodes;
01452 
01453                 if (i == 0) //low face at x==0
01454                 {
01455                     triangle_nodes.clear();
01456                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01457                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01458                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01459                     RegisterBoundaryElement(belem_index);
01460                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01461                     triangle_nodes.clear();
01462                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01463                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01464                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01465                     RegisterBoundaryElement(belem_index);
01466                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01467                 }
01468                 if (i == width-1) //high face at x=width
01469                 {
01470                     triangle_nodes.clear();
01471                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01472                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01473                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01474                     RegisterBoundaryElement(belem_index);
01475                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01476                     triangle_nodes.clear();
01477                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01478                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01479                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01480                     RegisterBoundaryElement(belem_index);
01481                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01482                 }
01483                 if (j == 0) //low face at y==0
01484                 {
01485                     triangle_nodes.clear();
01486                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01487                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01488                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01489                     RegisterBoundaryElement(belem_index);
01490                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01491                     triangle_nodes.clear();
01492                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01493                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01494                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01495                     RegisterBoundaryElement(belem_index);
01496                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01497                 }
01498                 if (j == height-1) //high face at y=height
01499                 {
01500                     triangle_nodes.clear();
01501                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01502                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01503                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01504                     RegisterBoundaryElement(belem_index);
01505                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01506                     triangle_nodes.clear();
01507                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01508                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01509                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01510                     RegisterBoundaryElement(belem_index);
01511                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01512                 }
01513                 if (k == 0) //low face at z==0
01514                 {
01515                     triangle_nodes.clear();
01516                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01517                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01518                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
01519                     RegisterBoundaryElement(belem_index);
01520                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01521                     triangle_nodes.clear();
01522                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
01523                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
01524                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
01525                     RegisterBoundaryElement(belem_index);
01526                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01527                 }
01528                 if (k == depth-1) //high face at z=depth
01529                 {
01530                     triangle_nodes.clear();
01531                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01532                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01533                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
01534                     RegisterBoundaryElement(belem_index);
01535                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01536                     triangle_nodes.clear();
01537                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
01538                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
01539                     triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
01540                     RegisterBoundaryElement(belem_index);
01541                     this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
01542                 }
01543             }//i
01544         }//j
01545     }//k
01546 }
01547 
01548 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01549 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(const double xFactor, const double yFactor, const double zFactor)
01550 {
01551     //Base class scale (scales node positions)
01552     AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(xFactor, yFactor, zFactor);
01553     //Scales halos
01554     for (unsigned i=0; i<mHaloNodes.size(); i++)
01555     {
01556         c_vector<double, SPACE_DIM>& r_location = mHaloNodes[i]->rGetModifiableLocation();
01557         if (SPACE_DIM>=3)
01558         {
01559             r_location[2] *= zFactor;
01560         }
01561         if (SPACE_DIM>=2)
01562         {
01563             r_location[1] *= yFactor;
01564         }
01565         r_location[0] *= xFactor;
01566     }
01567 
01568 }
01569 
01570 
01571 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01572 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ParMetisLibraryNodePartitioning(AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>& rMeshReader,
01573                                                                                        std::set<unsigned>& rElementsOwned,
01574                                                                                        std::set<unsigned>& rNodesOwned,
01575                                                                                        std::set<unsigned>& rHaloNodesOwned,
01576                                                                                        std::vector<unsigned>& rProcessorsOffset)
01577 {
01578     assert(PetscTools::IsParallel());
01579     assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
01580 
01581     const unsigned num_elements = rMeshReader.GetNumElements();
01582     const unsigned num_procs = PetscTools::GetNumProcs();
01583     const unsigned local_proc_index = PetscTools::GetMyRank();
01584 
01585     /*
01586      *  Work out initial element distribution
01587      */
01588     idxtype element_distribution[num_procs+1];
01589     idxtype element_count[num_procs];
01590 
01591     element_distribution[0]=0;
01592 
01593     for (unsigned proc_index=1; proc_index<num_procs; proc_index++)
01594     {
01595         element_distribution[proc_index] = element_distribution[proc_index-1] + num_elements/num_procs;
01596         element_count[proc_index-1] = element_distribution[proc_index] - element_distribution[proc_index-1];
01597     }
01598 
01599     element_distribution[num_procs] = num_elements;
01600     element_count[num_procs-1] = element_distribution[num_procs] - element_distribution[num_procs-1];
01601 
01602     /*
01603      *  Create distributed mesh data structure
01604      */
01605     unsigned first_local_element = element_distribution[local_proc_index];
01606     unsigned last_plus_one_element = element_distribution[local_proc_index+1];
01607     unsigned num_local_elements = last_plus_one_element - first_local_element;
01608 
01609     idxtype* eind = new idxtype[num_local_elements*(ELEMENT_DIM+1)];
01610     idxtype* eptr = new idxtype[num_local_elements+1];
01611 
01612     if ( ! rMeshReader.IsFileFormatBinary() )
01613     {
01614         // Advance the file pointer to the first element I own.
01615         for (unsigned element_index = 0; element_index < first_local_element; element_index++)
01616         {
01617             ElementData element_data = rMeshReader.GetNextElementData();
01618         }
01619     }
01620 
01621     unsigned counter=0;
01622     for (unsigned element_index = 0; element_index < num_local_elements; element_index++)
01623     {
01624         ElementData element_data;
01625         
01626         if ( rMeshReader.IsFileFormatBinary() )
01627         {
01628             element_data = rMeshReader.GetElementData(first_local_element + element_index);
01629         }
01630         else
01631         {
01632             element_data = rMeshReader.GetNextElementData();
01633         }        
01634 
01635         eptr[element_index] = counter;
01636         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01637         {
01638             eind[counter++] = element_data.NodeIndices[i];
01639         }
01640 
01641     }
01642     eptr[num_local_elements] = counter;
01643 
01644     rMeshReader.Reset();
01645 
01646     int numflag = 0; // METIS speak for C-style numbering
01647     int ncommonnodes = 3; // Connectivity degree. Manual recommends 3 for meshes made exclusively of tetrahedra.
01648     MPI_Comm communicator = PETSC_COMM_WORLD;
01649 
01650     idxtype* xadj;
01651     idxtype* adjncy;
01652 
01653     Timer::Reset();
01654     ParMETIS_V3_Mesh2Dual(element_distribution, eptr, eind,
01655                           &numflag, &ncommonnodes, &xadj, &adjncy, &communicator);
01656     //Timer::Print("ParMETIS Mesh2Dual");
01657 
01658     delete[] eind;
01659     delete[] eptr;
01660 
01661     int weight_flag = 0; // unweighted graph
01662     int n_constrains = 0; // number of weights that each vertex has (number of balance constrains)
01663     int n_subdomains = PetscTools::GetNumProcs();
01664     int options[3]; // extra options
01665     options[0] = 0; // ignore extra options
01666     int edgecut;
01667 
01668     idxtype* local_partition = new idxtype[num_local_elements];
01669 
01670 /*
01671  *  In order to use ParMETIS_V3_PartGeomKway, we need to sort out how to compute the coordinates of the
01672  *  centers of each element efficiently.
01673  *
01674  *  In the meantime use ParMETIS_V3_PartKway.
01675  */
01676 //    int n_dimensions = ELEMENT_DIM;
01677 //    float node_coordinates[num_local_elements*SPACE_DIM];
01678 //
01679 //    ParMETIS_V3_PartGeomKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
01680 //                             &n_dimensions, node_coordinates, &n_constrains, &n_subdomains, NULL, NULL,
01681 //                             options, &edgecut, local_partition, &communicator);
01682 
01683     Timer::Reset();
01684     ParMETIS_V3_PartKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
01685                          &n_constrains, &n_subdomains, NULL, NULL,
01686                          options, &edgecut, local_partition, &communicator);
01687     //Timer::Print("ParMETIS PartKway");
01688 
01689 
01690     idxtype* global_element_partition = new idxtype[num_elements];
01691 
01692     MPI_Allgatherv(local_partition, num_local_elements, MPI_INT,
01693                    global_element_partition, element_count, element_distribution, MPI_INT, PETSC_COMM_WORLD);
01694 
01695     delete[] local_partition;
01696 
01697     for(unsigned elem_index=0; elem_index<num_elements; elem_index++)
01698     {
01699         if ((unsigned) global_element_partition[elem_index] == local_proc_index)
01700         {
01701             rElementsOwned.insert(elem_index);
01702         }
01703     }
01704 
01705     rMeshReader.Reset();
01706     free(xadj);
01707     free(adjncy);
01708 
01709     unsigned num_nodes = rMeshReader.GetNumNodes();
01710 
01711     //unsigned global_node_partition[num_nodes]; // initialised to UNASSIGNED (do #define UNASSIGNED -1
01712     std::vector<unsigned> global_node_partition;
01713     global_node_partition.resize(num_nodes, UNASSIGNED_NODE);
01714 
01715     assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
01716     rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
01717 
01718 
01719     /*
01720      *  Work out node distribution based on initial element distribution returned by ParMETIS
01721      *
01722      *  In this loop we handle 4 different data structures:
01723      *      global_node_partition and rProcessorsOffset are global,
01724      *      rNodesOwned and rHaloNodesOwned are local.
01725      */
01726     
01727     std::vector<unsigned> element_access_order;
01728 
01729     if ( rMeshReader.IsFileFormatBinary() )
01730     {     
01731         RandomNumberGenerator* p_gen = RandomNumberGenerator::Instance();
01732         p_gen->Reseed(0);
01733         p_gen->Shuffle(mTotalNumElements,element_access_order);
01734     }
01735     else
01736     {
01737         for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
01738         {
01739             element_access_order.push_back(element_number);
01740         }
01741     }
01742     
01743     
01744     for (unsigned element_count = 0; element_count < mTotalNumElements; element_count++)
01745     {
01746         unsigned element_number = element_access_order[element_count];
01747         unsigned element_owner = global_element_partition[element_number];
01748         
01749         ElementData element_data;
01750         
01751         if ( rMeshReader.IsFileFormatBinary() )
01752         {
01753             element_data = rMeshReader.GetElementData(element_number);
01754         }
01755         else
01756         {
01757             element_data = rMeshReader.GetNextElementData();
01758         }        
01759 
01760         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01761         {
01762             /*
01763              *  For each node in this element, check whether it hasn't been assigned to another processor yet.
01764              *  If so, assign it to the owner the element. Otherwise, consider it halo.
01765              */
01766             if( global_node_partition[element_data.NodeIndices[i]] == UNASSIGNED_NODE )
01767             {
01768                 if (element_owner == local_proc_index)
01769                 {
01770                     rNodesOwned.insert(element_data.NodeIndices[i]);
01771                 }
01772 
01773                 global_node_partition[element_data.NodeIndices[i]] = element_owner;
01774 
01775                 // Offset is defined as the first node owned by a processor. We compute it incrementally.
01776                 // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
01777                 // offset a position.
01778                 for (unsigned proc=element_owner+1; proc<PetscTools::GetNumProcs(); proc++)
01779                 {
01780                     rProcessorsOffset[proc]++;
01781                 }
01782             }
01783             else
01784             {
01785                 if (element_owner == local_proc_index)
01786                 {
01787                     //if (rNodesOwned.find(element_data.NodeIndices[i]) == rNodesOwned.end())
01788                     if (global_node_partition[element_data.NodeIndices[i]] != local_proc_index)
01789                     {
01790                         rHaloNodesOwned.insert(element_data.NodeIndices[i]);
01791                     }
01792                 }
01793             }
01794         }
01795     }
01796 
01797     delete[] global_element_partition;
01798 
01799     /*
01800      *  Refine element distribution. Add extra elements that parMETIS didn't consider initially but
01801      *  include any node owned by the processor. This ensures that all the system matrix rows are
01802      *  assembled locally.
01803      */
01804     rMeshReader.Reset();
01805 
01806     for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
01807     {
01808         ElementData element_data = rMeshReader.GetNextElementData();
01809 
01810         bool element_owned = false;
01811         std::set<unsigned> temp_halo_nodes;
01812 
01813         for (unsigned i=0; i<ELEMENT_DIM+1; i++)
01814         {
01815             if (rNodesOwned.find(element_data.NodeIndices[i]) != rNodesOwned.end())
01816             {
01817                 element_owned = true;
01818                 rElementsOwned.insert(element_number);
01819             }
01820             else
01821             {
01822                 temp_halo_nodes.insert(element_data.NodeIndices[i]);
01823             }
01824         }
01825 
01826         if (element_owned)
01827         {
01828             rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
01829         }
01830     }
01831 
01832     rMeshReader.Reset();
01833 
01834     /*
01835      *  Once we know the offsets we can compute the permutation vector
01836      */
01837     std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
01838 
01839     this->mNodesPermutation.resize(this->GetNumNodes());
01840 
01841     for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
01842     {
01843         unsigned partition = global_node_partition[node_index];
01844         assert(partition!=UNASSIGNED_NODE);
01845 
01846         this->mNodesPermutation[node_index] = rProcessorsOffset[partition] + local_index[partition];
01847 
01848         local_index[partition]++;
01849     }
01850 
01851 }
01852 
01853 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01854 ChasteCuboid<SPACE_DIM> DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::CalculateBoundingBox() const
01855 {
01856     ChastePoint<SPACE_DIM> my_minimum_point;
01857     ChastePoint<SPACE_DIM> my_maximum_point;
01858 
01859     try
01860     {
01861         ChasteCuboid<SPACE_DIM> my_box=AbstractMesh<ELEMENT_DIM, SPACE_DIM>::CalculateBoundingBox();
01862         my_minimum_point=my_box.rGetLowerCorner();
01863         my_maximum_point=my_box.rGetUpperCorner();
01864     }
01865     catch (Exception& e)
01866     {
01867 #define COVERAGE_IGNORE
01868         PetscTools::ReplicateException(true);
01869         throw e;
01870 #undef COVERAGE_IGNORE
01871     }
01872 
01873     PetscTools::ReplicateException(false);
01874     
01875     c_vector<double, SPACE_DIM> global_minimum_point;
01876     c_vector<double, SPACE_DIM> global_maximum_point;
01877     MPI_Allreduce(&my_minimum_point.rGetLocation()[0], &global_minimum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
01878     MPI_Allreduce(&my_maximum_point.rGetLocation()[0], &global_maximum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
01879 
01880     ChastePoint<SPACE_DIM> min(global_minimum_point);
01881     ChastePoint<SPACE_DIM> max(global_maximum_point);
01882 
01883     return ChasteCuboid<SPACE_DIM>(min, max);
01884 }
01885 
01886 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01887 typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIteratorBegin() const
01888 {
01889     return mHaloNodes.begin();
01890 }
01891 
01892 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
01893 typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIteratorEnd() const
01894 {
01895     return mHaloNodes.end();
01896 }
01897 
01899 // Explicit instantiation
01901 
01902 template class DistributedTetrahedralMesh<1,1>;
01903 template class DistributedTetrahedralMesh<1,2>;
01904 template class DistributedTetrahedralMesh<1,3>;
01905 template class DistributedTetrahedralMesh<2,2>;
01906 template class DistributedTetrahedralMesh<2,3>;
01907 template class DistributedTetrahedralMesh<3,3>;
01908 
01909 
01910 // Serialization for Boost >= 1.36
01911 #include "SerializationExportWrapperForCpp.hpp"
01912 EXPORT_TEMPLATE_CLASS_ALL_DIMS(DistributedTetrahedralMesh)

Generated on Mon Apr 18 11:35:35 2011 for Chaste by  doxygen 1.5.5