Chaste Commit::f2ff7ee04e70ac9d06c57344df8d017dbb12b97b
DistributedTetrahedralMesh.cpp
1/*
2
3Copyright (c) 2005-2024, University of Oxford.
4All rights reserved.
5
6University of Oxford means the Chancellor, Masters and Scholars of the
7University of Oxford, having an administrative office at Wellington
8Square, Oxford OX1 2JD, UK.
9
10This file is part of Chaste.
11
12Redistribution and use in source and binary forms, with or without
13modification, are permitted provided that the following conditions are met:
14 * Redistributions of source code must retain the above copyright notice,
15 this list of conditions and the following disclaimer.
16 * Redistributions in binary form must reproduce the above copyright notice,
17 this list of conditions and the following disclaimer in the documentation
18 and/or other materials provided with the distribution.
19 * Neither the name of the University of Oxford nor the names of its
20 contributors may be used to endorse or promote products derived from this
21 software without specific prior written permission.
22
23THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
29GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34*/
35
36#include "DistributedTetrahedralMesh.hpp"
37
38#include <cassert>
39#include <sstream>
40#include <string>
41#include <iterator>
42#include <algorithm>
43#include <boost/scoped_array.hpp>
44
45#include "Exception.hpp"
46#include "Element.hpp"
47#include "BoundaryElement.hpp"
48
49#include "PetscTools.hpp"
50#include "DistributedVectorFactory.hpp"
51#include "OutputFileHandler.hpp"
52#include "NodePartitioner.hpp"
53
54#include "RandomNumberGenerator.hpp"
55
56#include "Timer.hpp"
57#include "TetrahedralMesh.hpp"
58#include "Warnings.hpp"
59
60#include "petscao.h"
61#include <parmetis.h>
62#if (PARMETIS_MAJOR_VERSION >= 4) //ParMETIS 4.x and above
63//Redefine the index type so that we can still use the old name "idxtype"
64#define idxtype idx_t
65#else
66//Old version of ParMETIS used "float" which may appear elsewhere in, for example, tetgen
67#define real_t float
68#endif
69
71// IMPLEMENTATION
73
74template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
76 :
77 mTotalNumElements(0u),
78 mTotalNumBoundaryElements(0u),
79 mTotalNumNodes(0u),
80 mpSpaceRegion(nullptr),
81 mPartitioning(partitioningMethod)
82{
83 if (ELEMENT_DIM == 1 && (partitioningMethod != DistributedTetrahedralMeshPartitionType::GEOMETRIC))
84 {
85 //No METIS partition is possible - revert to DUMB
86 mPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
87 }
88}
89
90template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
92{
93 for (unsigned i=0; i<this->mHaloNodes.size(); i++)
94 {
95 delete this->mHaloNodes[i];
96 }
97}
98
99template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
105
106template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
109 std::set<unsigned>& rNodesOwned,
110 std::set<unsigned>& rHaloNodesOwned,
111 std::set<unsigned>& rElementsOwned,
112 std::vector<unsigned>& rProcessorsOffset)
113{
114 if (mPartitioning == DistributedTetrahedralMeshPartitionType::METIS_LIBRARY)
115 {
116 WARNING("METIS partitioning is deprecated. Switching to parMETIS");
117 mPartitioning = DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY;
118 }
119 if (mPartitioning == DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && !PetscTools::HasParMetis())
121 // The following warning can only be reproduced on machines which do not have the PETSc/parMETIS interface.
122// LCOV_EXCL_START
123 WARNING("PETSc/parMETIS partitioning requires PETSc to be configured with parMETIS as an option. Current install has PETSc and parMETIS installed independently. Switching to parMETIS");
124 mPartitioning = DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY;
125// LCOV_EXCL_STOP
126 }
128 if (mPartitioning==DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY && PetscTools::IsParallel())
129 {
130 /*
131 * With ParMetisLibraryNodeAndElementPartitioning we compute the element partition first
132 * and then we work out the node ownership.
133 */
134 ParMetisLibraryNodeAndElementPartitioning(rMeshReader, rElementsOwned, rNodesOwned, rHaloNodesOwned, rProcessorsOffset);
136 else
137 {
138 /*
139 * Otherwise we compute the node partition and then we work out element distribution
140 */
141 if (mPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
142 {
143 NodePartitioner<ELEMENT_DIM, SPACE_DIM>::PetscMatrixPartitioning(rMeshReader, this->mNodePermutation, rNodesOwned, rProcessorsOffset);
144 }
145 else if (mPartitioning==DistributedTetrahedralMeshPartitionType::GEOMETRIC && PetscTools::IsParallel())
146 {
147 if (!mpSpaceRegion)
148 {
149 EXCEPTION("Using GEOMETRIC partition for DistributedTetrahedralMesh with local regions not set. Call SetProcessRegion(ChasteCuboid)");
151 NodePartitioner<ELEMENT_DIM, SPACE_DIM>::GeometricPartitioning(rMeshReader, this->mNodePermutation, rNodesOwned, rProcessorsOffset, mpSpaceRegion);
152 }
153 else
154 {
157
158 if (rMeshReader.HasNclFile())
159 {
160 // Form a set of all the element indices we are going to own
161 // (union of the sets from the lines in the NCL file)
162 for (std::set<unsigned>::iterator iter = rNodesOwned.begin();
163 iter != rNodesOwned.end();
164 ++iter)
165 {
166 std::vector<unsigned> containing_elements = rMeshReader.GetContainingElementIndices( *iter );
167 rElementsOwned.insert( containing_elements.begin(), containing_elements.end() );
168 }
169
170 // Iterate through that set rather than mTotalNumElements (knowing that we own a least one node in each line)
171 // Then read all the data into a node_index set
172 std::set<unsigned> node_index_set;
174 for (std::set<unsigned>::iterator iter = rElementsOwned.begin();
175 iter != rElementsOwned.end();
176 ++iter)
177 {
178 ElementData element_data = rMeshReader.GetElementData(*iter);
179 node_index_set.insert( element_data.NodeIndices.begin(), element_data.NodeIndices.end() );
180 }
181
182 // Subtract off the rNodesOwned set to produce rHaloNodesOwned.
183 // Note that rNodesOwned is a subset of node_index_set.
184 std::set_difference(node_index_set.begin(), node_index_set.end(),
185 rNodesOwned.begin(), rNodesOwned.end(),
186 std::inserter(rHaloNodesOwned, rHaloNodesOwned.begin()));
187 }
188 else
189 {
190 for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
191 {
192 ElementData element_data = rMeshReader.GetNextElementData();
193
194 bool element_owned = false;
195 std::set<unsigned> temp_halo_nodes;
196
197 for (std::vector<unsigned>::const_iterator it = element_data.NodeIndices.begin();
198 it != element_data.NodeIndices.end();
199 ++it)
201 if (rNodesOwned.find(*it) != rNodesOwned.end())
202 {
203 element_owned = true;
204 rElementsOwned.insert(element_number);
205 }
206 else
207 {
208 temp_halo_nodes.insert(*it);
209 }
210 }
211
212 if (element_owned)
214 rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
215 }
216 }
217 }
218
219 if (mPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
223 {
224 Timer::PrintAndReset("Element and halo node assignation");
225 }
226 }
227 }
228 rMeshReader.Reset();
230
231template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
234{
235 std::set<unsigned> nodes_owned;
236 std::set<unsigned> halo_nodes_owned;
237 std::set<unsigned> elements_owned;
238 std::vector<unsigned> proc_offsets;//(PetscTools::GetNumProcs());
239
240 this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
241 mTotalNumElements = rMeshReader.GetNumElements();
242 mTotalNumBoundaryElements = rMeshReader.GetNumFaces();
243 mTotalNumNodes = rMeshReader.GetNumNodes();
244
245
247 Timer::Reset();
248 ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets);
250 //Timer::Print("partitioning");
251
252 // Reserve memory
253 this->mElements.reserve(elements_owned.size());
254 this->mNodes.reserve(nodes_owned.size());
255
256 if (rMeshReader.IsFileFormatBinary())
257 {
260 std::vector<double> coords;
261 // Binary : load only the nodes which are needed
262 for (typename AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>::NodeIterator node_it = rMeshReader.GetNodeIteratorBegin(nodes_owned);
263 node_it != rMeshReader.GetNodeIteratorEnd();
264 ++node_it)
265 {
266 // Loop over wholly-owned nodes
267 unsigned global_node_index = node_it.GetIndex();
268 coords = *node_it;
269 RegisterNode(global_node_index);
270 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, coords, false);
271
272// Node attributes in binary format are not yet supported, see #1730
273// for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
274// {
275// double attribute = rMeshReader.GetNodeAttributes()[i];
276// p_node->AddNodeAttribute(attribute);
277// }
278
279 this->mNodes.push_back(p_node);
280 }
281 for (typename AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>::NodeIterator node_it = rMeshReader.GetNodeIteratorBegin(halo_nodes_owned);
282 node_it != rMeshReader.GetNodeIteratorEnd();
283 ++node_it)
284 {
285 // Loop over halo-owned nodes
286 unsigned global_node_index = node_it.GetIndex();
287 coords = *node_it;
288 RegisterHaloNode(global_node_index);
289 mHaloNodes.push_back(new Node<SPACE_DIM>(global_node_index, coords, false));
290 }
291 }
292 else
294 // Ascii : Sequentially load all the nodes and store those owned (or halo-owned) by the process
296 for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
297 {
298 std::vector<double> coords;
300 coords = rMeshReader.GetNextNode();
301
302 // The node is owned by the processor
303 if (nodes_owned.find(node_index) != nodes_owned.end())
304 {
305 RegisterNode(node_index);
306 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, coords, false);
307
308 for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
309 {
310 double attribute = rMeshReader.GetNodeAttributes()[i];
311 p_node->AddNodeAttribute(attribute);
313
314 this->mNodes.push_back(p_node);
315 }
316
317 // The node is a halo node in this processor
318 if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
319 {
320 RegisterHaloNode(node_index);
321 mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
323 }
324 }
325
327 = rMeshReader.GetElementIteratorBegin(elements_owned);
328 elem_it != rMeshReader.GetElementIteratorEnd();
329 ++elem_it)
330 {
331 ElementData element_data = *elem_it;
332 unsigned global_element_index = elem_it.GetIndex();
333
334 std::vector<Node<SPACE_DIM>*> nodes;
335 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
336 {
337 // Because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
338 nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
339 }
341 RegisterElement(global_element_index);
342 Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(global_element_index, nodes);
343 this->mElements.push_back(p_element);
344
345 if (rMeshReader.GetNumElementAttributes() > 0)
346 {
347 assert(rMeshReader.GetNumElementAttributes() == 1);
348 double attribute_value = element_data.AttributeValue;
349 p_element->SetAttribute(attribute_value);
351 }
352
353 // Boundary nodes and elements
354 try
355 {
356 for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
357 {
358 ElementData face_data = rMeshReader.GetNextFaceData();
359 std::vector<unsigned> node_indices = face_data.NodeIndices;
360
361 bool own = false;
362
363 for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
364 {
365 // if I own this node
366 if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
367 {
368 own = true;
369 break;
370 }
371 }
372
373 if (!own)
375 continue;
376 }
377
378 // Determine if this is a boundary face
379 //std::set<unsigned> containing_element_indices; // Elements that contain this face
380 std::vector<Node<SPACE_DIM>*> nodes;
382 for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
383 {
384 //because we have populated mNodes and mHaloNodes above, we can now use this method,
385 //which SHOULD never throw (but it does).
386 try
387 {
388 nodes.push_back(this->GetNodeOrHaloNode(node_indices[node_index]));
389 }
390 catch (Exception &)
391 {
392 EXCEPTION("Face does not appear in element file (Face " << face_index << " in "<<this->mMeshFileBaseName<< ")");
393 }
394 }
396 // This is a boundary face
397 // Ensure all its nodes are marked as boundary nodes
398 for (unsigned j=0; j<nodes.size(); j++)
399 {
400 if (!nodes[j]->IsBoundaryNode())
401 {
402 nodes[j]->SetAsBoundaryNode();
403 this->mBoundaryNodes.push_back(nodes[j]);
404 }
405 // Register the index that this boundary element will have with the node
406 nodes[j]->AddBoundaryElement(face_index);
407 }
408
409 RegisterBoundaryElement(face_index);
410 BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>* p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(face_index, nodes);
411 this->mBoundaryElements.push_back(p_boundary_element);
412
413 if (rMeshReader.GetNumFaceAttributes() > 0)
414 {
415 assert(rMeshReader.GetNumFaceAttributes() == 1);
416 p_boundary_element->SetAttribute(face_data.AttributeValue);
417 }
418 }
419 }
420 catch (Exception &e)
421 {
422 PetscTools::ReplicateException(true); //Bad face exception
423 throw e;
424 }
427 if (mPartitioning != DistributedTetrahedralMeshPartitionType::DUMB && PetscTools::IsParallel())
428 {
429 assert(this->mNodePermutation.size() != 0);
430 // If we are partitioning (and permuting) a mesh, we need to be certain that we aren't doing it twice
431 assert(rMeshReader.HasNodePermutation() == false);
432
433 // We reorder so that each process owns a contiguous set of the indices and we can then build a distributed vector factory.
434 ReorderNodes();
435
436 unsigned num_owned;
437 unsigned rank = PetscTools::GetMyRank();
439 {
440 num_owned = proc_offsets[rank+1]-proc_offsets[rank];
441 }
442 else
443 {
444 num_owned = mTotalNumNodes - proc_offsets[rank];
445 }
446
447 assert(!this->mpDistributedVectorFactory);
448 this->mpDistributedVectorFactory = new DistributedVectorFactory(this->GetNumNodes(), num_owned);
449 }
450 else
451 {
452 // Dumb or sequential partition
453 assert(this->mpDistributedVectorFactory);
455 if (rMeshReader.HasNodePermutation())
456 {
457 // This is probably an unarchiving operation where the original run applied a permutation to the mesh
458 // We need to re-record that the permutation has happened (so that we can archive it correctly later).
459 this->mNodePermutation = rMeshReader.rGetNodePermutation();
460 }
461 }
462 rMeshReader.Reset();
463}
464
465template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
467{
468 return this->mNodes.size();
469}
470
471template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
473{
474 return this->mHaloNodes.size();
475}
476
477template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
479{
480 return this->mElements.size();
481}
482
483template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
485{
486 return this->mBoundaryElements.size();
487}
488
489template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
491{
492 return mTotalNumNodes;
493}
494
495template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
497{
498 return mTotalNumNodes;
499}
500
501template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
503{
504 return mTotalNumElements;
505}
506
507template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
512
513template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
515{
516 return mTotalNumBoundaryElements;
517}
518
519template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
521{
522 //Make sure the output vector is empty
523 rHaloIndices.clear();
524 for (unsigned i=0; i<mHaloNodes.size(); i++)
525 {
526 rHaloIndices.push_back(mHaloNodes[i]->GetIndex());
527 }
528}
529
530template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
532{
533 if (mpSpaceRegion == nullptr)
534 {
535 EXCEPTION("Trying to get unset mpSpaceRegion");
536 }
537 return mpSpaceRegion;
538}
539
540template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
542{
543 // All the local elements are owned by the processor (obviously...)
544 //Does nothing - unlike the non-distributed version
545}
546
547template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
552
553template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
555{
556 try
557 {
559 }
560 catch(Exception&) // we don't own the element
561 {
562 return false;
563 }
564}
565
566template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
568{
569 try
570 {
572 }
573 catch(Exception&) // we don't own the face
574 {
575 return false;
576 }
577}
578
579template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
581{
582 mNodesMapping[index] = this->mNodes.size();
583}
584
585template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
587{
588 mHaloNodesMapping[index] = mHaloNodes.size();
589}
590
591template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
593{
594 mElementsMapping[index] = this->mElements.size();
595}
596
597template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
599{
600 mBoundaryElementsMapping[index] = this->mBoundaryElements.size();
601}
602
603template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
605{
606 std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index);
607
608 if (node_position == mNodesMapping.end())
609 {
610 EXCEPTION("Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank());
611 }
612 return node_position->second;
613}
614
615//template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
616//unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
617//{
618// assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
619// return mHaloNodesMapping[index];
620//}
621
622template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
624{
625 std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index);
626
627 if (element_position == mElementsMapping.end())
628 {
629 EXCEPTION("Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank());
630 }
631
632 return element_position->second;
633}
634
635template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
637{
638 std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index);
639
640 if (boundary_element_position == mBoundaryElementsMapping.end())
641 {
642 EXCEPTION("Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank());
643 }
644
645 return boundary_element_position->second;
646}
647
648template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
650{
651 std::map<unsigned, unsigned>::const_iterator node_position;
652 // First search the halo (expected to be a smaller map so quicker)
653 if ((node_position=mHaloNodesMapping.find(index)) != mHaloNodesMapping.end())
654 {
655 return mHaloNodes[node_position->second];
656 }
657 // Next search the owned node
658 if ((node_position=mNodesMapping.find(index)) != mNodesMapping.end())
659 {
660 //Found an owned node
661 return this->mNodes[node_position->second];
662 }
663 // Not here
664 EXCEPTION("Requested node/halo " << index << " does not belong to processor " << PetscTools::GetMyRank());
665}
666
667template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
669{
670 assert(PetscTools::IsParallel());
671
672 // Need to rebuild global-local maps
673 mNodesMapping.clear();
674 mHaloNodesMapping.clear();
675
676 // Update indices
677 for (unsigned index=0; index<this->mNodes.size(); index++)
678 {
679 unsigned old_index = this->mNodes[index]->GetIndex();
680 unsigned new_index = this->mNodePermutation[old_index];
681
682 this->mNodes[index]->SetIndex(new_index);
683 mNodesMapping[new_index] = index;
684 }
685
686 for (unsigned index=0; index<mHaloNodes.size(); index++)
687 {
688 unsigned old_index = mHaloNodes[index]->GetIndex();
689 unsigned new_index = this->mNodePermutation[old_index];
690
691 mHaloNodes[index]->SetIndex(new_index);
692 mHaloNodesMapping[new_index] = index;
693 }
694}
695
696template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
698{
699 assert(ELEMENT_DIM == 1); // LCOV_EXCL_LINE
700
701 //Check that there are enough nodes to make the parallelisation worthwhile
702 if (width==0)
703 {
704 EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
705 }
706
707 // Hook to pick up when we are using a geometric partition.
708 if (mPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
709 {
710 if (!mpSpaceRegion)
711 {
712 EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
713 }
714
715 // Write a serial file, the load on distributed processors.
717 {
718 TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_linear_mesh");
720 base_mesh.ConstructLinearMesh(width);
721 mesh_writer.WriteFilesUsingMesh(base_mesh);
722 }
724
725 OutputFileHandler output_handler("", false);
726
727 std::string output_dir = output_handler.GetOutputDirectoryFullPath();
728 TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_linear_mesh");
729
730 this->ConstructFromMeshReader(mesh_reader);
731 }
732 else // use a default partition.
733 {
734 //Use dumb partition so that archiving doesn't permute anything
735 mPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
736 mTotalNumNodes=width+1;
737 mTotalNumBoundaryElements=2u;
738 mTotalNumElements=width;
739
740 //Use DistributedVectorFactory to make a dumb partition of the nodes
741 assert(!this->mpDistributedVectorFactory);
742 this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
743 if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
744 {
745 // It's a short mesh and this process owns no nodes.
746 // This return cannot be covered by regular testing, but is covered by the Nightly -np 3 builder
747 return; //LCOV_EXCL_LINE
748 }
749
750 /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
751 * higher numbered process may have dropped out of this construction altogether
752 * (because is has no local ownership)
753 */
754 bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
755
756 unsigned lo_node=this->mpDistributedVectorFactory->GetLow();
757 unsigned hi_node=this->mpDistributedVectorFactory->GetHigh();
759 {
760 //Allow for a halo node
761 lo_node--;
762 }
763 if (!am_top_most)
764 {
765 //Allow for a halo node
766 hi_node++;
767 }
768 Node<SPACE_DIM>* p_old_node=nullptr;
769 for (unsigned node_index=lo_node; node_index<hi_node; node_index++)
770 {
771 // create node or halo-node
772 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, node_index==0 || node_index==width, node_index);
773 if (node_index<this->mpDistributedVectorFactory->GetLow() ||
774 node_index==this->mpDistributedVectorFactory->GetHigh() )
775 {
776 //Beyond left or right it's a halo node
777 RegisterHaloNode(node_index);
778 mHaloNodes.push_back(p_node);
779 }
780 else
781 {
782 RegisterNode(node_index);
783 this->mNodes.push_back(p_node); // create node
784
785 //A boundary face has to be wholely owned by the process
786 //Since, when ELEMENT_DIM>1 we have *at least* boundary node as a non-halo
787 if (node_index==0) // create left boundary node and boundary element
788 {
789 this->mBoundaryNodes.push_back(p_node);
790 RegisterBoundaryElement(0);
791 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(0, p_node) );
792 }
793 if (node_index==width) // create right boundary node and boundary element
794 {
795 this->mBoundaryNodes.push_back(p_node);
796 RegisterBoundaryElement(1);
797 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(1, p_node) );
798 }
799 }
800 if (node_index>lo_node) // create element
801 {
802 std::vector<Node<SPACE_DIM>*> nodes;
803 nodes.push_back(p_old_node);
804 nodes.push_back(p_node);
805 RegisterElement(node_index-1);
806 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(node_index-1, nodes) );
807 }
808 //Keep track of the node which we've just created
809 p_old_node=p_node;
810 }
811 }
812}
813
814template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
816{
817 assert(SPACE_DIM == 2); // LCOV_EXCL_LINE
818 assert(ELEMENT_DIM == 2); // LCOV_EXCL_LINE
819 //Check that there are enough nodes to make the parallelisation worthwhile
820 if (height==0)
821 {
822 EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
823 }
824
825 // Hook to pick up when we are using a geometric partition.
826 if (mPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
827 {
828 if (!mpSpaceRegion)
829 {
830 EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
831 }
832
833 // Write a serial file, the load on distributed processors.
835 {
836 TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_rectangular_mesh");
838 base_mesh.ConstructRectangularMesh(width, height);
839 mesh_writer.WriteFilesUsingMesh(base_mesh);
840 }
842
843 OutputFileHandler output_handler("", false);
844
845 std::string output_dir = output_handler.GetOutputDirectoryFullPath();
846 TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_rectangular_mesh");
847
848 this->ConstructFromMeshReader(mesh_reader);
849 }
850 else
851 {
852 //Use dumb partition so that archiving doesn't permute anything
853 mPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
854
855 mTotalNumNodes=(width+1)*(height+1);
856 mTotalNumBoundaryElements=(width+height)*2;
857 mTotalNumElements=width*height*2;
858
859 //Use DistributedVectorFactory to make a dumb partition of space
860 DistributedVectorFactory y_partition(height+1);
861 unsigned lo_y = y_partition.GetLow();
862 unsigned hi_y = y_partition.GetHigh();
863 //Dumb partition of nodes has to be such that each process gets complete slices
864 assert(!this->mpDistributedVectorFactory);
865 this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*y_partition.GetLocalOwnership());
866 if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
867 {
868 // It's a short mesh and this process owns no nodes.
869 // This return cannot be covered by regular testing, but is covered by the Nightly -np 3 builder
870 return; //LCOV_EXCL_LINE
871 }
872
873 /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
874 * higher numbered process may have dropped out of this construction altogether
875 * (because is has no local ownership)
876 */
877 bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
878
879
881 {
882 //Allow for a halo node
883 lo_y--;
884 }
885 if (!am_top_most)
886 {
887 //Allow for a halo node
888 hi_y++;
889 }
890
891 //Construct the nodes
892 for (unsigned j=lo_y; j<hi_y; j++)
893 {
894 for (unsigned i=0; i<width+1; i++)
895 {
896 bool is_boundary=false;
897 if (i==0 || j==0 || i==width || j==height)
898 {
899 is_boundary=true;
900 }
901 unsigned global_node_index=((width+1)*(j) + i); //Verified from sequential
902 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j);
903 if (j<y_partition.GetLow() || j==y_partition.GetHigh() )
904 {
905 //Beyond left or right it's a halo node
906 RegisterHaloNode(global_node_index);
907 mHaloNodes.push_back(p_node);
908 }
909 else
910 {
911 RegisterNode(global_node_index);
912 this->mNodes.push_back(p_node);
913 }
914 if (is_boundary)
915 {
916 this->mBoundaryNodes.push_back(p_node);
917 }
918 }
919 }
920
921 //Construct the boundary elements
922 unsigned belem_index;
923 //Top
924 if (am_top_most)
925 {
926 for (unsigned i=0; i<width; i++)
927 {
928 std::vector<Node<SPACE_DIM>*> nodes;
929 nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i+1 ));
930 nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i ));
931 belem_index=i;
932 RegisterBoundaryElement(belem_index);
933 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
934 }
935 }
936
937 //Right
938 for (unsigned j=lo_y+1; j<hi_y; j++)
939 {
940 std::vector<Node<SPACE_DIM>*> nodes;
941 nodes.push_back(GetNodeOrHaloNode( (width+1)*j-1 ));
942 nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1)-1 ));
943 belem_index=width+j-1;
944 RegisterBoundaryElement(belem_index);
945 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
946 }
947
948 //Bottom
950 {
951 for (unsigned i=0; i<width; i++)
952 {
953 std::vector<Node<SPACE_DIM>*> nodes;
954 nodes.push_back(GetNodeOrHaloNode( i ));
955 nodes.push_back(GetNodeOrHaloNode( i+1 ));
956 belem_index=width+height+i;
957 RegisterBoundaryElement(belem_index);
958 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
959 }
960 }
961
962 //Left
963 for (unsigned j=lo_y; j<hi_y-1; j++)
964 {
965 std::vector<Node<SPACE_DIM>*> nodes;
966 nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1) ));
967 nodes.push_back(GetNodeOrHaloNode( (width+1)*(j) ));
968 belem_index=2*width+height+j;
969 RegisterBoundaryElement(belem_index);
970 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
971 }
972
973
974 //Construct the elements
975 unsigned elem_index;
976 for (unsigned j=lo_y; j<hi_y-1; j++)
977 {
978 for (unsigned i=0; i<width; i++)
979 {
980 unsigned parity=(i+(height-j))%2;//Note that parity is measured from the top-left (not bottom left) for historical reasons
981 unsigned nw=(j+1)*(width+1)+i; //ne=nw+1
982 unsigned sw=(j)*(width+1)+i; //se=sw+1
983 std::vector<Node<SPACE_DIM>*> upper_nodes;
984 upper_nodes.push_back(GetNodeOrHaloNode( nw ));
985 upper_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
986 if (stagger==false || parity == 1)
987 {
988 upper_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
989 }
990 else
991 {
992 upper_nodes.push_back(GetNodeOrHaloNode( sw ));
993 }
994 elem_index=2*(j*width+i);
995 RegisterElement(elem_index);
996 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,upper_nodes));
997 std::vector<Node<SPACE_DIM>*> lower_nodes;
998 lower_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
999 lower_nodes.push_back(GetNodeOrHaloNode( sw ));
1000 if (stagger==false ||parity == 1)
1001 {
1002 lower_nodes.push_back(GetNodeOrHaloNode( nw ));
1003 }
1004 else
1005 {
1006 lower_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
1007 }
1008 elem_index++;
1009 RegisterElement(elem_index);
1010 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,lower_nodes));
1011 }
1012 }
1013 }
1014}
1015
1016
1017template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1019 unsigned height,
1020 unsigned depth)
1021{
1022 assert(SPACE_DIM == 3); // LCOV_EXCL_LINE
1023 assert(ELEMENT_DIM == 3); // LCOV_EXCL_LINE
1024 //Check that there are enough nodes to make the parallelisation worthwhile
1025 if (depth==0)
1026 {
1027 EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
1028 }
1029
1030 // Hook to pick up when we are using a geometric partition.
1031 if (mPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
1032 {
1033 if (!mpSpaceRegion)
1034 {
1035 EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
1036 }
1037
1038 // Write a serial file, the load on distributed processors.
1040 {
1041 TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_cuboid_mesh");
1043 base_mesh.ConstructCuboid(width, height, depth);
1044 mesh_writer.WriteFilesUsingMesh(base_mesh);
1045 }
1047
1048 OutputFileHandler output_handler("", false);
1049
1050 std::string output_dir = output_handler.GetOutputDirectoryFullPath();
1051 TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_cuboid_mesh");
1052
1053 this->ConstructFromMeshReader(mesh_reader);
1054 }
1055 else
1056 {
1057 //Use dumb partition so that archiving doesn't permute anything
1058 mPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
1059
1060 mTotalNumNodes=(width+1)*(height+1)*(depth+1);
1061 mTotalNumBoundaryElements=((width*height)+(width*depth)+(height*depth))*4;//*2 for top-bottom, *2 for tessellating each unit square
1062 mTotalNumElements=width*height*depth*6;
1063
1064 //Use DistributedVectorFactory to make a dumb partition of space
1065 DistributedVectorFactory z_partition(depth+1);
1066 unsigned lo_z = z_partition.GetLow();
1067 unsigned hi_z = z_partition.GetHigh();
1068
1069 //Dumb partition of nodes has to be such that each process gets complete slices
1070 assert(!this->mpDistributedVectorFactory);
1071 this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*(height+1)*z_partition.GetLocalOwnership());
1072 if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
1073 {
1074 // It's a short mesh and this process owns no nodes.
1075 // This return cannot be covered by regular testing, but is covered by the Nightly -np 3 builder
1076 return; //LCOV_EXCL_LINE
1077 }
1078
1079 /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
1080 * higher numbered process may have dropped out of this construction altogether
1081 * (because is has no local ownership)
1082 */
1083 bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
1084
1085 if (!PetscTools::AmMaster())
1086 {
1087 //Allow for a halo node
1088 lo_z--;
1089 }
1090 if (!am_top_most)
1091 {
1092 //Allow for a halo node
1093 hi_z++;
1094 }
1095
1096 //Construct the nodes
1097 unsigned global_node_index;
1098 for (unsigned k=lo_z; k<hi_z; k++)
1099 {
1100 for (unsigned j=0; j<height+1; j++)
1101 {
1102 for (unsigned i=0; i<width+1; i++)
1103 {
1104 bool is_boundary = false;
1105 if (i==0 || j==0 || k==0 || i==width || j==height || k==depth)
1106 {
1107 is_boundary = true;
1108 }
1109 global_node_index = (k*(height+1)+j)*(width+1)+i;
1110
1111 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j, k);
1112
1113 if (k<z_partition.GetLow() || k==z_partition.GetHigh() )
1114 {
1115 //Beyond left or right it's a halo node
1116 RegisterHaloNode(global_node_index);
1117 mHaloNodes.push_back(p_node);
1118 }
1119 else
1120 {
1121 RegisterNode(global_node_index);
1122 this->mNodes.push_back(p_node);
1123 }
1124
1125 if (is_boundary)
1126 {
1127 this->mBoundaryNodes.push_back(p_node);
1128 }
1129 }
1130 }
1131 }
1132
1133 // Construct the elements
1134
1135 unsigned element_nodes[6][4] = {{0, 1, 5, 7}, {0, 1, 3, 7},
1136 {0, 2, 3, 7}, {0, 2, 6, 7},
1137 {0, 4, 6, 7}, {0, 4, 5, 7}};
1138 std::vector<Node<SPACE_DIM>*> tetrahedra_nodes;
1139
1140 for (unsigned k=lo_z; k<hi_z-1; k++)
1141 {
1142 unsigned belem_index = 0;
1143 if (k != 0)
1144 {
1145 // height*width squares on upper face, k layers of 2*height+2*width square aroun
1146 belem_index = 2*(height*width+k*2*(height+width));
1147 }
1148
1149 for (unsigned j=0; j<height; j++)
1150 {
1151 for (unsigned i=0; i<width; i++)
1152 {
1153 // Compute the nodes' index
1154 unsigned global_node_indices[8];
1155 unsigned local_node_index = 0;
1156
1157 for (unsigned z = 0; z < 2; z++)
1158 {
1159 for (unsigned y = 0; y < 2; y++)
1160 {
1161 for (unsigned x = 0; x < 2; x++)
1162 {
1163 global_node_indices[local_node_index] = i+x+(width+1)*(j+y+(height+1)*(k+z));
1164
1165 local_node_index++;
1166 }
1167 }
1168 }
1169
1170 for (unsigned m = 0; m < 6; m++)
1171 {
1172 // Tetrahedra #m
1173
1174 tetrahedra_nodes.clear();
1175
1176 for (unsigned n = 0; n < 4; n++)
1177 {
1178 tetrahedra_nodes.push_back(GetNodeOrHaloNode( global_node_indices[element_nodes[m][n]] ));
1179 }
1180 unsigned elem_index = 6 * ((k*height+j)*width+i)+m;
1181 RegisterElement(elem_index);
1182 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index, tetrahedra_nodes));
1183 }
1184
1185 //Are we at a boundary?
1186 std::vector<Node<SPACE_DIM>*> triangle_nodes;
1187
1188 if (i == 0) //low face at x==0
1189 {
1190 triangle_nodes.clear();
1191 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1192 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1193 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1194 RegisterBoundaryElement(belem_index);
1195 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1196 triangle_nodes.clear();
1197 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1198 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1199 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1200 RegisterBoundaryElement(belem_index);
1201 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1202 }
1203 if (i == width-1) //high face at x=width
1204 {
1205 triangle_nodes.clear();
1206 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1207 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1208 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1209 RegisterBoundaryElement(belem_index);
1210 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1211 triangle_nodes.clear();
1212 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1213 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1214 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1215 RegisterBoundaryElement(belem_index);
1216 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1217 }
1218 if (j == 0) //low face at y==0
1219 {
1220 triangle_nodes.clear();
1221 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1222 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1223 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1224 RegisterBoundaryElement(belem_index);
1225 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1226 triangle_nodes.clear();
1227 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1228 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1229 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1230 RegisterBoundaryElement(belem_index);
1231 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1232 }
1233 if (j == height-1) //high face at y=height
1234 {
1235 triangle_nodes.clear();
1236 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1237 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1238 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1239 RegisterBoundaryElement(belem_index);
1240 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1241 triangle_nodes.clear();
1242 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1243 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1244 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1245 RegisterBoundaryElement(belem_index);
1246 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1247 }
1248 if (k == 0) //low face at z==0
1249 {
1250 triangle_nodes.clear();
1251 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1252 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1253 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1254 RegisterBoundaryElement(belem_index);
1255 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1256 triangle_nodes.clear();
1257 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1258 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1259 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1260 RegisterBoundaryElement(belem_index);
1261 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1262 }
1263 if (k == depth-1) //high face at z=depth
1264 {
1265 triangle_nodes.clear();
1266 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1267 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1268 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1269 RegisterBoundaryElement(belem_index);
1270 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1271 triangle_nodes.clear();
1272 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1273 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1274 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1275 RegisterBoundaryElement(belem_index);
1276 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1277 }
1278 }//i
1279 }//j
1280 }//k
1281 }
1282}
1283
1284template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1285void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(const double xFactor, const double yFactor, const double zFactor)
1286{
1287 //Base class scale (scales node positions)
1289 //Scales halos
1290 for (unsigned i=0; i<mHaloNodes.size(); i++)
1291 {
1292 c_vector<double, SPACE_DIM>& r_location = mHaloNodes[i]->rGetModifiableLocation();
1293 if (SPACE_DIM>=3)
1294 {
1295 r_location[2] *= zFactor;
1296 }
1297 if (SPACE_DIM>=2)
1298 {
1299 r_location[1] *= yFactor;
1300 }
1301 r_location[0] *= xFactor;
1302 }
1303
1304}
1305
1306
1307template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1310 std::set<unsigned>& rElementsOwned,
1311 std::set<unsigned>& rNodesOwned,
1312 std::set<unsigned>& rHaloNodesOwned,
1313 std::vector<unsigned>& rProcessorsOffset)
1314{
1315 assert(PetscTools::IsParallel());
1316 assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // LCOV_EXCL_LINE // Metis works with triangles and tetras
1317
1318 const unsigned num_elements = rMeshReader.GetNumElements();
1319 const unsigned num_procs = PetscTools::GetNumProcs();
1320 const unsigned local_proc_index = PetscTools::GetMyRank();
1321
1322 /*
1323 * Work out initial element distribution
1324 */
1325 boost::scoped_array<idxtype> element_distribution(new idxtype[num_procs+1]);
1326 boost::scoped_array<int> element_counts(new int[num_procs]);
1327
1328 element_distribution[0] = 0;
1329
1330 for (unsigned proc_index=1; proc_index<num_procs; proc_index++)
1331 {
1332 element_distribution[proc_index] = element_distribution[proc_index-1] + num_elements/num_procs;
1333 element_counts[proc_index-1] = element_distribution[proc_index] - element_distribution[proc_index-1];
1334 }
1335
1336 element_distribution[num_procs] = num_elements;
1337 element_counts[num_procs-1] = element_distribution[num_procs] - element_distribution[num_procs-1];
1338
1339 /*
1340 * Create distributed mesh data structure
1341 */
1342 idxtype first_local_element = element_distribution[local_proc_index];
1343 idxtype last_plus_one_element = element_distribution[local_proc_index+1];
1344 idxtype num_local_elements = last_plus_one_element - first_local_element;
1345
1346 boost::scoped_array<idxtype> eind(new idxtype[num_local_elements*(ELEMENT_DIM+1)]);
1347 boost::scoped_array<idxtype> eptr(new idxtype[num_local_elements+1]);
1348
1349 if (rMeshReader.IsFileFormatBinary() && first_local_element > 0)
1350 {
1351 // Advance the file pointer to the first element before the ones I own.
1352 rMeshReader.GetElementData(first_local_element - 1);
1353 }
1354 else
1355 {
1356 // Advance the file pointer to the first element before the ones I own.
1357 for (idxtype element_index = 0; element_index < first_local_element; element_index++)
1358 {
1359 rMeshReader.GetNextElementData();
1360 }
1361 }
1362
1363 unsigned counter = 0;
1364 for (idxtype element_index = 0; element_index < num_local_elements; element_index++)
1365 {
1366 ElementData element_data;
1367
1368 element_data = rMeshReader.GetNextElementData();
1369
1370 eptr[element_index] = counter;
1371 for (unsigned i=0; i<ELEMENT_DIM+1; i++)
1372 {
1373 eind[counter++] = element_data.NodeIndices[i];
1374 }
1375 }
1376 eptr[num_local_elements] = counter;
1377
1378 rMeshReader.Reset();
1379
1380 idxtype numflag = 0; // METIS speak for C-style numbering
1381 /* Connectivity degree.
1382 * Specifically, an GRAPH EDGE is placed between any two elements if and only if they share
1383 * at least this many nodes.
1384 *
1385 * Manual recommends "for meshes containing only triangular, tetrahedral,
1386 * hexahedral, or rectangular elements, this parameter can be set to two, three, four, or two, respectively.
1387 */
1388 idxtype ncommonnodes = 3; //Linear tetrahedra
1389 if (ELEMENT_DIM == 2)
1390 {
1391 ncommonnodes = 2;
1392 }
1393
1394 MPI_Comm communicator = PETSC_COMM_WORLD;
1395
1396 idxtype* xadj;
1397 idxtype* adjncy;
1398
1399 Timer::Reset();
1400 ParMETIS_V3_Mesh2Dual(element_distribution.get(), eptr.get(), eind.get(),
1401 &numflag, &ncommonnodes, &xadj, &adjncy, &communicator);
1402 //Timer::Print("ParMETIS Mesh2Dual");
1403
1404 // Be more memory efficient, and get rid of (maybe large) arrays as soon as they're no longer needed, rather than at end of scope
1405 eind.reset();
1406 eptr.reset();
1407
1408 idxtype weight_flag = 0; // unweighted graph
1409 idxtype n_constraints = 1; // number of weights that each vertex has (number of balance constraints)
1410 idxtype n_subdomains = PetscTools::GetNumProcs();
1411 idxtype options[3]; // extra options
1412 options[0] = 0; // ignore extra options
1413 idxtype edgecut;
1414 boost::scoped_array<real_t> tpwgts(new real_t[n_subdomains]);
1415 real_t ubvec_value = (real_t)1.05;
1416 for (unsigned proc=0; proc<PetscTools::GetNumProcs(); proc++)
1417 {
1418 tpwgts[proc] = ((real_t)1.0)/n_subdomains;
1419 }
1420 boost::scoped_array<idxtype> local_partition(new idxtype[num_local_elements]);
1421
1422/*
1423 * In order to use ParMETIS_V3_PartGeomKway, we need to sort out how to compute the coordinates of the
1424 * centers of each element efficiently.
1425 *
1426 * In the meantime use ParMETIS_V3_PartKway.
1427 */
1428// int n_dimensions = ELEMENT_DIM;
1429// float node_coordinates[num_local_elements*SPACE_DIM];
1430//
1431// ParMETIS_V3_PartGeomKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
1432// &n_dimensions, node_coordinates, &n_constraints, &n_subdomains, NULL, NULL,
1433// options, &edgecut, local_partition, &communicator);
1434
1435 Timer::Reset();
1436 ParMETIS_V3_PartKway(element_distribution.get(), xadj, adjncy, nullptr, nullptr, &weight_flag, &numflag,
1437 &n_constraints, &n_subdomains, tpwgts.get(), &ubvec_value,
1438 options, &edgecut, local_partition.get(), &communicator);
1439 //Timer::Print("ParMETIS PartKway");
1440 tpwgts.reset();
1441
1442 boost::scoped_array<idxtype> global_element_partition(new idxtype[num_elements]);
1443
1444 //idxtype is normally int (see metis-4.0/Lib/struct.h 17-22) but is 64bit on Windows
1445 MPI_Datatype mpi_idxtype = MPI_LONG_LONG_INT;
1446 if (sizeof(idxtype) == sizeof(int))
1447 {
1448 mpi_idxtype = MPI_INT;
1449 }
1450 boost::scoped_array<int> int_element_distribution(new int[num_procs+1]);
1451 for (unsigned i=0; i<num_procs+1; ++i)
1452 {
1453 int_element_distribution[i] = element_distribution[i];
1454 }
1455 MPI_Allgatherv(local_partition.get(), num_local_elements, mpi_idxtype,
1456 global_element_partition.get(), element_counts.get(), int_element_distribution.get(), mpi_idxtype, PETSC_COMM_WORLD);
1457
1458 local_partition.reset();
1459
1460 for (unsigned elem_index=0; elem_index<num_elements; elem_index++)
1461 {
1462 if ((unsigned) global_element_partition[elem_index] == local_proc_index)
1463 {
1464 rElementsOwned.insert(elem_index);
1465 }
1466 }
1467
1468 rMeshReader.Reset();
1469 free(xadj);
1470 free(adjncy);
1471
1472 unsigned num_nodes = rMeshReader.GetNumNodes();
1473
1474 // Initialise with no nodes known
1475 std::vector<unsigned> global_node_partition(num_nodes, UNASSIGNED_NODE);
1476
1477 assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
1478 rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
1479
1480 /*
1481 * Work out node distribution based on initial element distribution returned by ParMETIS
1482 *
1483 * In this loop we handle 4 different data structures:
1484 * global_node_partition and rProcessorsOffset are global,
1485 * rNodesOwned and rHaloNodesOwned are local.
1486 */
1487
1488 /*
1489 * Note that at this point each process has to read the entire element file in order to compute
1490 * the node partition form the initial element distribution.
1491 * * Previously we randomly permuted the BIN file element access order on each process so that the processes
1492 * weren't reading the same file sectors at the same time
1493 * * We noted that with large files (above about 0.5 GigaByte) on HECToR the random access file reader
1494 * was spending all its time in fseekg. This is presumably because each fseekg from the start of the file
1495 * involves multiple levels of indirect file block pointers.
1496 * * Hence the use of random element reading is only useful for the niche of moderately large meshes with
1497 * process counts in the thousands.
1498 * Hence BIN file element permuting is deprecated - we just read the file in order.
1499 * See
1500 * https://chaste.cs.ox.ac.uk/trac/browser/trunk/mesh/src/common/DistributedTetrahedralMesh.cpp?rev=19291#L1459
1501 */
1502
1503 for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
1504 {
1505 unsigned element_owner = global_element_partition[element_number];
1506
1507 ElementData element_data;
1508
1509 element_data = rMeshReader.GetNextElementData();
1510
1511 for (std::vector<unsigned>::const_iterator node_it = element_data.NodeIndices.begin();
1512 node_it != element_data.NodeIndices.end();
1513 ++node_it)
1514 {
1515 /*
1516 * For each node in this element, check whether it hasn't been assigned to another processor yet.
1517 * If so, assign it to the owner of the element. Otherwise, consider it halo.
1518 */
1519 if (global_node_partition[*node_it] == UNASSIGNED_NODE)
1520 {
1521 if (element_owner == local_proc_index)
1522 {
1523 rNodesOwned.insert(*node_it);
1524 }
1525
1526 global_node_partition[*node_it] = element_owner;
1527
1528 // Offset is defined as the first node owned by a processor. We compute it incrementally.
1529 // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
1530 // offset a position.
1531 for (unsigned proc=element_owner+1; proc<PetscTools::GetNumProcs(); proc++)
1532 {
1533 rProcessorsOffset[proc]++;
1534 }
1535 }
1536 else
1537 {
1538 if (element_owner == local_proc_index)
1539 {
1540 //if (rNodesOwned.find(*node_it) == rNodesOwned.end())
1541 if (global_node_partition[*node_it] != local_proc_index)
1542 {
1543 rHaloNodesOwned.insert(*node_it);
1544 }
1545 }
1546 }
1547 }
1548 }
1549
1550
1551 /*
1552 * Refine element distribution. Add extra elements that parMETIS didn't consider initially but
1553 * include any node owned by the processor. This ensures that all the system matrix rows are
1554 * assembled locally.
1555 * It may be that some of these elements are in the set of owned nodes erroneously.
1556 * The original set of owned elements (from the k-way partition) informed a
1557 * node partition. It may be that an element near the edge of this new node
1558 * partition may no longer be needed.
1559 *
1560 * Note that rather than clearing the set we could remove elements to the original element partition set
1561 * with erase(), if (!element_owned) below.
1562 */
1563 rElementsOwned.clear();
1564 rMeshReader.Reset();
1565 for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
1566 {
1567 ElementData element_data = rMeshReader.GetNextElementData();
1568
1569 bool element_owned = false;
1570 std::set<unsigned> temp_halo_nodes;
1571
1572 for (std::vector<unsigned>::const_iterator node_it = element_data.NodeIndices.begin();
1573 node_it != element_data.NodeIndices.end();
1574 ++node_it)
1575 {
1576 if (rNodesOwned.find(*node_it) != rNodesOwned.end())
1577 {
1578 element_owned = true;
1579 rElementsOwned.insert(element_number);
1580 }
1581 else
1582 {
1583 temp_halo_nodes.insert(*node_it);
1584 }
1585 }
1586
1587 if (element_owned)
1588 {
1589 rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
1590 }
1591 }
1592
1593 rMeshReader.Reset();
1594
1595 /*
1596 * Once we know the offsets we can compute the permutation vector
1597 */
1598 std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
1599
1600 this->mNodePermutation.resize(this->GetNumNodes());
1601
1602 for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
1603 {
1604 unsigned partition = global_node_partition[node_index];
1605 assert(partition != UNASSIGNED_NODE);
1606
1607 this->mNodePermutation[node_index] = rProcessorsOffset[partition] + local_index[partition];
1608
1609 local_index[partition]++;
1610 }
1611}
1612
1613template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1615{
1616 ChastePoint<SPACE_DIM> my_minimum_point;
1617 ChastePoint<SPACE_DIM> my_maximum_point;
1618
1619 try
1620 {
1622 my_minimum_point=my_box.rGetLowerCorner();
1623 my_maximum_point=my_box.rGetUpperCorner();
1624 }
1625 // LCOV_EXCL_START
1626 catch (Exception& e)
1627 {
1629 throw e;
1630
1631 }
1632 // LCOV_EXCL_STOP
1633
1635
1636 c_vector<double, SPACE_DIM> global_minimum_point;
1637 c_vector<double, SPACE_DIM> global_maximum_point;
1638 MPI_Allreduce(&my_minimum_point.rGetLocation()[0], &global_minimum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
1639 MPI_Allreduce(&my_maximum_point.rGetLocation()[0], &global_maximum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
1640
1641 ChastePoint<SPACE_DIM> min(global_minimum_point);
1642 ChastePoint<SPACE_DIM> max(global_maximum_point);
1643
1644 return ChasteCuboid<SPACE_DIM>(min, max);
1645}
1646
1647template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1649{
1650 // Call base method to find closest on local processor
1651 unsigned best_node_index = AbstractMesh<ELEMENT_DIM, SPACE_DIM>::GetNearestNodeIndex(rTestPoint);
1652
1653 // Recalculate the distance to the best node (if this process has one)
1654 double best_node_point_distance = DBL_MAX;
1655 if (best_node_index != UINT_MAX)
1656 {
1657 best_node_point_distance = norm_2(this->GetNode(best_node_index)->rGetLocation() - rTestPoint.rGetLocation());
1658 }
1659
1660
1661 // This is a handy data structure that will work with MPI_DOUBLE_INT data type.
1662 // There is no MPI_DOUBLE_UNSIGNED
1663 struct
1664 {
1665 double distance;
1666 int node_index;
1667 } value, minval;
1668
1669 value.node_index = best_node_index;
1670 value.distance = best_node_point_distance;
1671
1672 MPI_Allreduce( &value, &minval, 1, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD );
1673
1674 return minval.node_index;
1675}
1676
1677template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1679{
1681 c_vector<double, 2> global_min_max;
1682
1683 MPI_Allreduce(&local_min_max[0], &global_min_max[0], 1, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
1684 MPI_Allreduce(&local_min_max[1], &global_min_max[1], 1, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
1685
1686 return global_min_max;
1687}
1688
1689template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1694
1695template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1696void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Rotate(c_matrix<double, SPACE_DIM, SPACE_DIM> rotationMatrix)
1697{
1698 // First do the extras
1699 for (unsigned i=0; i<this->mHaloNodes.size(); i++)
1700 {
1701 c_vector<double, SPACE_DIM>& r_location = this->mHaloNodes[i]->rGetModifiableLocation();
1702 r_location = prod(rotationMatrix, r_location);
1703 }
1704 // Now a copy of the base class implementation
1705 for (unsigned i=0; i<this->mNodes.size(); i++)
1706 {
1707 c_vector<double, SPACE_DIM>& r_location = this->mNodes[i]->rGetModifiableLocation();
1708 r_location = prod(rotationMatrix, r_location);
1709 }
1710}
1711
1712template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1713void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Translate(const c_vector<double, SPACE_DIM>& rDisplacement)
1714{
1715 // First do the extras
1716 for (unsigned i=0; i<this->mHaloNodes.size(); i++)
1717 {
1718 c_vector<double, SPACE_DIM>& r_location = this->mHaloNodes[i]->rGetModifiableLocation();
1719 r_location += rDisplacement;
1720 }
1721 // Now a copy of the base class implementation
1722 for (unsigned i=0; i<this->mNodes.size(); i++)
1723 {
1724 c_vector<double, SPACE_DIM>& r_location = this->mNodes[i]->rGetModifiableLocation();
1725 r_location += rDisplacement;
1726 }
1727}
1728
1729template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1734
1735// Explicit instantiation
1736template class DistributedTetrahedralMesh<1,1>;
1737template class DistributedTetrahedralMesh<1,2>;
1738template class DistributedTetrahedralMesh<1,3>;
1739template class DistributedTetrahedralMesh<2,2>;
1740template class DistributedTetrahedralMesh<2,3>;
1741template class DistributedTetrahedralMesh<3,3>;
1742
1743// Serialization for Boost >= 1.36
#define EXCEPTION(message)
#define EXPORT_TEMPLATE_CLASS_ALL_DIMS(CLASS)
void SetAttribute(double attribute)
ElementIterator GetElementIteratorBegin()
virtual void Reset()=0
virtual unsigned GetNumElements() const =0
virtual std::vector< unsigned > GetContainingElementIndices(unsigned index)
virtual ElementData GetNextElementData()=0
virtual std::string GetMeshFileBaseName()
virtual unsigned GetNumFaces() const =0
NodeIterator GetNodeIteratorEnd()
virtual unsigned GetNumElementAttributes() const
virtual std::vector< double > GetNextNode()=0
virtual const std::vector< unsigned > & rGetNodePermutation()
virtual std::vector< double > GetNodeAttributes()
virtual unsigned GetNumFaceAttributes() const
virtual bool HasNodePermutation()
virtual bool IsFileFormatBinary()
virtual unsigned GetNumNodes() const =0
virtual ElementData GetNextFaceData()=0
NodeIterator GetNodeIteratorBegin()
virtual ElementData GetElementData(unsigned index)
ElementIterator GetElementIteratorEnd()
virtual void SetDistributedVectorFactory(DistributedVectorFactory *pFactory)
virtual ChasteCuboid< SPACE_DIM > CalculateBoundingBox() const
virtual unsigned GetNearestNodeIndex(const ChastePoint< SPACE_DIM > &rTestPoint)
virtual void Scale(const double xFactor=1.0, const double yFactor=1.0, const double zFactor=1.0)
virtual void WriteFilesUsingMesh(AbstractTetrahedralMesh< ELEMENT_DIM, SPACE_DIM > &rMesh, bool keepOriginalElementIndexing=true)
virtual c_vector< double, 2 > CalculateMinMaxEdgeLengths()
virtual void ConstructRectangularMesh(unsigned width, unsigned height, bool stagger=true)
virtual void ConstructCuboid(unsigned width, unsigned height, unsigned depth)
virtual void ConstructLinearMesh(unsigned width)
const ChastePoint< SPACE_DIM > & rGetUpperCorner() const
const ChastePoint< SPACE_DIM > & rGetLowerCorner() const
c_vector< double, DIM > & rGetLocation()
bool CalculateDesignatedOwnershipOfElement(unsigned elementIndex)
virtual void ConstructFromMeshReader(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader)
ChasteCuboid< SPACE_DIM > * GetProcessRegion()
virtual c_vector< double, 2 > CalculateMinMaxEdgeLengths()
Node< SPACE_DIM > * GetNodeOrHaloNode(unsigned index) const
void ComputeMeshPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::set< unsigned > &rNodesOwned, std::set< unsigned > &rHaloNodesOwned, std::set< unsigned > &rElementsOwned, std::vector< unsigned > &rProcessorsOffset)
virtual unsigned GetNearestNodeIndex(const ChastePoint< SPACE_DIM > &rTestPoint)
HaloNodeIterator GetHaloNodeIteratorEnd() const
unsigned SolveElementMapping(unsigned index) const
void ConstructRectangularMesh(unsigned width, unsigned height, bool stagger=true)
void Translate(const c_vector< double, SPACE_DIM > &rDisplacement)
DistributedTetrahedralMeshPartitionType::type GetPartitionType() const
unsigned SolveBoundaryElementMapping(unsigned index) const
void SetDistributedVectorFactory(DistributedVectorFactory *pFactory)
bool CalculateDesignatedOwnershipOfBoundaryElement(unsigned faceIndex)
HaloNodeIterator GetHaloNodeIteratorBegin() const
DistributedTetrahedralMesh(DistributedTetrahedralMeshPartitionType::type partitioningMethod=DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY)
unsigned SolveNodeMapping(unsigned index) const
void ParMetisLibraryNodeAndElementPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::set< unsigned > &rElementsOwned, std::set< unsigned > &rNodesOwned, std::set< unsigned > &rHaloNodesOwned, std::vector< unsigned > &rProcessorsOffset)
virtual void Scale(const double xFactor=1.0, const double yFactor=1.0, const double zFactor=1.0)
void ConstructCuboid(unsigned width, unsigned height, unsigned depth)
void Rotate(c_matrix< double, SPACE_DIM, SPACE_DIM > rotationMatrix)
std::vector< Node< SPACE_DIM > * >::const_iterator HaloNodeIterator
void SetProcessRegion(ChasteCuboid< SPACE_DIM > *pRegion)
void GetHaloNodeIndices(std::vector< unsigned > &rHaloIndices) const
virtual ChasteCuboid< SPACE_DIM > CalculateBoundingBox() const
DistributedTetrahedralMeshPartitionType::type mPartitioning
static void DumbPartitioning(AbstractMesh< ELEMENT_DIM, SPACE_DIM > &rMesh, std::set< unsigned > &rNodesOwned)
static void PetscMatrixPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::vector< unsigned > &rNodePermutation, std::set< unsigned > &rNodesOwned, std::vector< unsigned > &rProcessorsOffset)
static void GeometricPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::vector< unsigned > &rNodePermutation, std::set< unsigned > &rNodesOwned, std::vector< unsigned > &rProcessorsOffset, ChasteCuboid< SPACE_DIM > *pRegion)
Definition Node.hpp:59
void AddNodeAttribute(double attribute)
Definition Node.cpp:170
std::string GetOutputDirectoryFullPath() const
static bool AmMaster()
static bool AmTopMost()
static void Barrier(const std::string callerId="")
static bool IsParallel()
static unsigned GetMyRank()
static void ReplicateException(bool flag)
static unsigned GetNumProcs()
static bool HasParMetis()
static void PrintAndReset(std::string message)
Definition Timer.cpp:70
static void Reset()
Definition Timer.cpp:44
std::vector< unsigned > NodeIndices