VtkMeshWriter.cpp

00001 /*
00002 
00003 Copyright (C) University of Oxford, 2005-2011
00004 
00005 University of Oxford means the Chancellor, Masters and Scholars of the
00006 University of Oxford, having an administrative office at Wellington
00007 Square, Oxford OX1 2JD, UK.
00008 
00009 This file is part of Chaste.
00010 
00011 Chaste is free software: you can redistribute it and/or modify it
00012 under the terms of the GNU Lesser General Public License as published
00013 by the Free Software Foundation, either version 2.1 of the License, or
00014 (at your option) any later version.
00015 
00016 Chaste is distributed in the hope that it will be useful, but WITHOUT
00017 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
00019 License for more details. The offer of Chaste under the terms of the
00020 License is subject to the License being interpreted in accordance with
00021 English Law and subject to any action against the University of Oxford
00022 being under the jurisdiction of the English Courts.
00023 
00024 You should have received a copy of the GNU Lesser General Public License
00025 along with Chaste. If not, see <http://www.gnu.org/licenses/>.
00026 
00027 */
00028 
00029 #include "VtkMeshWriter.hpp"
00030 #include "DistributedTetrahedralMesh.hpp"
00031 
00032 #ifdef CHASTE_VTK
00034 // Implementation
00036 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00037 VtkMeshWriter<ELEMENT_DIM, SPACE_DIM>::VtkMeshWriter(const std::string& rDirectory,
00038                      const std::string& rBaseName,
00039                      const bool& rCleanDirectory)
00040     : AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>(rDirectory, rBaseName, rCleanDirectory),
00041       mWriteParallelFiles(false)
00042 {
00043     this->mIndexFromZero = true;
00044 
00045     // Dubious, since we shouldn't yet know what any details of the mesh are.
00046     mpVtkUnstructedMesh = vtkUnstructuredGrid::New();
00047 }
00048 
00049 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00050 VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::~VtkMeshWriter()
00051 {
00052     mpVtkUnstructedMesh->Delete(); // Reference counted
00053 }
00054 
00055 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00056 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::MakeVtkMesh()
00057 {
00058     assert(SPACE_DIM==3 || SPACE_DIM == 2);
00059     assert(SPACE_DIM==ELEMENT_DIM);
00060     vtkPoints* p_pts = vtkPoints::New(VTK_DOUBLE);
00061     p_pts->GetData()->SetName("Vertex positions");
00062     for (unsigned item_num=0; item_num<this->GetNumNodes(); item_num++)
00063     {
00064         std::vector<double> current_item = this->GetNextNode(); //this->mNodeData[item_num];
00065         if (SPACE_DIM==2)
00066         {
00067             current_item.push_back(0.0);//For z-coordinate
00068         }
00069         assert(current_item.size() == 3);
00070         p_pts->InsertPoint(item_num, current_item[0], current_item[1], current_item[2]);
00071     }
00072 
00073     mpVtkUnstructedMesh->SetPoints(p_pts);
00074     p_pts->Delete(); //Reference counted
00075     for (unsigned item_num=0; item_num<this->GetNumElements(); item_num++)
00076     {
00077         std::vector<unsigned> current_element = this->GetNextElement().NodeIndices; // this->mElementData[item_num];
00078         assert(current_element.size() == ELEMENT_DIM + 1);
00079         vtkCell* p_cell=NULL;
00080         if (SPACE_DIM == 3)
00081         {
00082             p_cell = vtkTetra::New();
00083         }
00084         if (SPACE_DIM == 2)
00085         {
00086             p_cell = vtkTriangle::New();
00087         }
00088         vtkIdList* p_cell_id_list = p_cell->GetPointIds();
00089         for (unsigned j = 0; j < ELEMENT_DIM+1; ++j)
00090         {
00091             p_cell_id_list->SetId(j, current_element[j]);
00092         }
00093         mpVtkUnstructedMesh->InsertNextCell(p_cell->GetCellType(), p_cell_id_list);
00094         p_cell->Delete(); //Reference counted
00095     }
00096 }
00097 
00098 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00099 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddProvenance(std::string fileName)
00100 {
00101     std::string comment = "<!-- " + ChasteBuildInfo::GetProvenanceString() + "-->";
00102     
00103     out_stream p_vtu_file = this->mpOutputFileHandler->OpenOutputFile(fileName, std::ios::out | std::ios::app);
00104 
00105     *p_vtu_file << "\n" << comment << "\n";    
00106     p_vtu_file->close();
00107 }
00108 
00109 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00110 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::WriteFiles()
00111 {
00112     // Using separate scope here to make sure file is properly closed before re-opening it to add provenance info.
00113     {
00114         MakeVtkMesh();
00115         assert(mpVtkUnstructedMesh->CheckAttributes() == 0);
00116         vtkXMLUnstructuredGridWriter* p_writer = vtkXMLUnstructuredGridWriter::New();
00117         p_writer->SetInput(mpVtkUnstructedMesh);
00118         //Uninitialised stuff arises (see #1079), but you can remove
00119         //valgrind problems by removing compression:
00120         // **** REMOVE WITH CAUTION *****
00121         p_writer->SetCompressor(NULL);
00122         // **** REMOVE WITH CAUTION *****
00123         std::string vtk_file_name = this->mpOutputFileHandler->GetOutputDirectoryFullPath() + this->mBaseName+".vtu";
00124         p_writer->SetFileName(vtk_file_name.c_str());
00125         //p_writer->PrintSelf(std::cout, vtkIndent());
00126         p_writer->Write();
00127         p_writer->Delete(); //Reference counted
00128     }    
00129 
00130     AddProvenance(this->mBaseName + ".vtu");
00131 }
00132 
00133 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00134 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddCellData(std::string dataName, std::vector<double> dataPayload)
00135 {
00136     vtkDoubleArray* p_scalars = vtkDoubleArray::New();
00137     p_scalars->SetName(dataName.c_str());
00138     for (unsigned i=0; i<dataPayload.size(); i++)
00139     {
00140         p_scalars->InsertNextValue(dataPayload[i]);
00141     }
00142 
00143     vtkCellData* p_cell_data = mpVtkUnstructedMesh->GetCellData();
00144     p_cell_data->AddArray(p_scalars);
00145     p_scalars->Delete(); //Reference counted
00146 }
00147 
00148 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00149 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddCellData(std::string dataName, std::vector<c_vector<double, SPACE_DIM> > dataPayload)
00150 {
00151     vtkDoubleArray* p_vectors = vtkDoubleArray::New();
00152     p_vectors->SetName(dataName.c_str());
00153     p_vectors->SetNumberOfComponents(3);
00154     for (unsigned i=0; i<dataPayload.size(); i++)
00155     {
00156         for (unsigned j=0; j<SPACE_DIM; j++)
00157         {
00158             p_vectors->InsertNextValue(dataPayload[i][j]);
00159         }
00160         //When SPACE_DIM<3, then pad 
00161         for (unsigned j=SPACE_DIM; j<3; j++)
00162         {
00163             p_vectors->InsertNextValue(0.0);
00164         }
00165     }
00166 
00167     vtkCellData* p_cell_data = mpVtkUnstructedMesh->GetCellData();
00168     p_cell_data->AddArray(p_vectors);
00169     p_vectors->Delete(); //Reference counted
00170 }
00171 
00172 
00173 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00174 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddPointData(std::string dataName, std::vector<double> dataPayload)
00175 {
00176     vtkDoubleArray* p_scalars = vtkDoubleArray::New();
00177     p_scalars->SetName(dataName.c_str());
00178         
00179     if (mWriteParallelFiles)
00180     {
00181         // In parallel, the vector we pass will only contain the values from the privately owned nodes.
00182         // To get the values from the halo nodes (which will be inserted at the end of the vector we need to 
00183         // communicate with the equivalent vectors on other processes.
00184 
00185         // resize the payload data to include halos
00186         assert( dataPayload.size() == this->mpDistributedMesh->GetNumLocalNodes() );
00187         dataPayload.resize( this->mpDistributedMesh->GetNumLocalNodes() + this->mpDistributedMesh->GetNumHaloNodes() );
00188         
00189         
00190         // then do the communication
00191         for ( unsigned rank_offset = 1; rank_offset < PetscTools::GetNumProcs(); rank_offset++ )
00192         {
00193             unsigned send_to      = (PetscTools::GetMyRank() + rank_offset) % (PetscTools::GetNumProcs());
00194             unsigned receive_from = (PetscTools::GetMyRank() + PetscTools::GetNumProcs()- rank_offset ) % (PetscTools::GetNumProcs());
00195 
00196             unsigned number_of_nodes_to_send    = mNodesToSendPerProcess[send_to].size();
00197             unsigned number_of_nodes_to_receive = mNodesToReceivePerProcess[receive_from].size();
00198 
00199             // Pack
00200             if ( number_of_nodes_to_send > 0 )
00201             {
00202                 double send_data[number_of_nodes_to_send];
00203 
00204                 for (unsigned node = 0; node < number_of_nodes_to_send; node++)
00205                 {
00206                     unsigned global_node_index = mNodesToSendPerProcess[send_to][node];
00207                     unsigned local_node_index = global_node_index 
00208                                 - this->mpDistributedMesh->GetDistributedVectorFactory()->GetLow();
00209                     send_data[node] = dataPayload[local_node_index];
00210                 }
00211 
00212                 // Send
00213                 int ret;
00214                 ret = MPI_Send( send_data,
00215                                 number_of_nodes_to_send,
00216                                 MPI_DOUBLE,
00217                                 send_to,
00218                                 0,
00219                                 PETSC_COMM_WORLD );
00220                 assert ( ret == MPI_SUCCESS );
00221             }
00222 
00223             if ( number_of_nodes_to_receive > 0 )
00224             {
00225                 // Receive
00226                 double receive_data[number_of_nodes_to_receive];
00227                 MPI_Status status;
00228 
00229                 int ret;
00230                 ret = MPI_Recv( receive_data,
00231                                 number_of_nodes_to_receive,
00232                                 MPI_DOUBLE,
00233                                 receive_from,
00234                                 0,
00235                                 PETSC_COMM_WORLD,
00236                                 &status );
00237                 assert ( ret == MPI_SUCCESS);
00238 
00239                 // Unpack
00240                 for ( unsigned node = 0; node < number_of_nodes_to_receive; node++ )
00241                 {
00242                     unsigned global_node_index = mNodesToReceivePerProcess[receive_from][node];
00243                     unsigned halo_index = mGlobalToNodeIndexMap[global_node_index];
00244                     assert( halo_index >= this->mpDistributedMesh->GetNumLocalNodes() );
00245                     dataPayload[halo_index] = receive_data[node];
00246                 }
00247             }
00248         }    
00249     }
00250 
00251     for (unsigned i=0; i<dataPayload.size(); i++)
00252     {
00253         p_scalars->InsertNextValue(dataPayload[i]);
00254     }
00255 
00256     vtkPointData* p_point_data = mpVtkUnstructedMesh->GetPointData();
00257     p_point_data->AddArray(p_scalars);
00258     p_scalars->Delete(); //Reference counted
00259 
00260 }
00261 
00262 
00263 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00264 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddPointData(std::string dataName, std::vector<c_vector<double, SPACE_DIM> > dataPayload)
00265 {
00266     vtkDoubleArray* p_vectors = vtkDoubleArray::New();
00267     p_vectors->SetName(dataName.c_str());
00268 
00269     if (mWriteParallelFiles)
00270     {
00271         // In parallel, the vector we pass will only contain the values from the privately owned nodes.
00272         // To get the values from the halo nodes (which will be inserted at the end of the vector we need to 
00273         // communicate with the equivalent vectors on other processes.
00274 
00275         // resize the payload data to include halos
00276         assert( dataPayload.size() == this->mpDistributedMesh->GetNumLocalNodes() );
00277         dataPayload.resize( this->mpDistributedMesh->GetNumLocalNodes() + this->mpDistributedMesh->GetNumHaloNodes() );
00278         
00279         // then do the communication
00280         for ( unsigned rank_offset = 1; rank_offset < PetscTools::GetNumProcs(); rank_offset++ )
00281         {
00282             unsigned send_to      = (PetscTools::GetMyRank() + rank_offset) % (PetscTools::GetNumProcs());
00283             unsigned receive_from = (PetscTools::GetMyRank() + PetscTools::GetNumProcs()- rank_offset ) % (PetscTools::GetNumProcs());
00284 
00285             unsigned number_of_nodes_to_send    = mNodesToSendPerProcess[send_to].size();
00286             unsigned number_of_nodes_to_receive = mNodesToReceivePerProcess[receive_from].size();
00287 
00288             // Pack
00289             if ( number_of_nodes_to_send > 0 )
00290             {
00291                 double send_data[number_of_nodes_to_send * SPACE_DIM];
00292 
00293                 for (unsigned node = 0; node < number_of_nodes_to_send; node++)
00294                 {
00295                     unsigned global_node_index = mNodesToSendPerProcess[send_to][node];
00296                     unsigned local_node_index = global_node_index 
00297                                 - this->mpDistributedMesh->GetDistributedVectorFactory()->GetLow();
00298                     for (unsigned j=0; j<SPACE_DIM; j++)
00299                     {
00300                         send_data[ node*SPACE_DIM + j ] = dataPayload[local_node_index][j];
00301                     }
00302                 }
00303 
00304                 // Send
00305                 int ret;
00306                 ret = MPI_Send( send_data,
00307                                 number_of_nodes_to_send * SPACE_DIM,
00308                                 MPI_DOUBLE,
00309                                 send_to,
00310                                 0,
00311                                 PETSC_COMM_WORLD );
00312                 assert ( ret == MPI_SUCCESS );
00313             }
00314 
00315             if ( number_of_nodes_to_receive > 0 )
00316             {
00317                 // Receive
00318                 double receive_data[number_of_nodes_to_receive * SPACE_DIM];
00319                 MPI_Status status;
00320 
00321                 int ret;
00322                 ret = MPI_Recv( receive_data,
00323                                 number_of_nodes_to_receive * SPACE_DIM,
00324                                 MPI_DOUBLE,
00325                                 receive_from,
00326                                 0,
00327                                 PETSC_COMM_WORLD,
00328                                 &status );
00329                 assert ( ret == MPI_SUCCESS);
00330 
00331                 // Unpack
00332                 for ( unsigned node = 0; node < number_of_nodes_to_receive; node++ )
00333                 {
00334                     unsigned global_node_index = mNodesToReceivePerProcess[receive_from][node];
00335                     unsigned halo_index = mGlobalToNodeIndexMap[global_node_index];
00336                     assert( halo_index >= this->mpDistributedMesh->GetNumLocalNodes() );
00337                     for (unsigned j=0; j<SPACE_DIM; j++)
00338                     {
00339                         dataPayload[halo_index][j] = receive_data[ node*SPACE_DIM + j ];
00340                     }
00341                 }
00342             }
00343         }
00344     }
00345     
00346     p_vectors->SetNumberOfComponents(3);
00347     for (unsigned i=0; i<dataPayload.size(); i++)
00348     {
00349         for (unsigned j=0; j<SPACE_DIM; j++)
00350         {
00351             p_vectors->InsertNextValue(dataPayload[i][j]);
00352         }
00353         //When SPACE_DIM<3, then pad 
00354         for (unsigned j=SPACE_DIM; j<3; j++)
00355         {
00356             p_vectors->InsertNextValue(0.0);
00357         }
00358     }
00359 
00360     vtkPointData* p_point_data = mpVtkUnstructedMesh->GetPointData();
00361     p_point_data->AddArray(p_vectors);
00362     p_vectors->Delete(); //Reference counted
00363 }
00364 
00365 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00366 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::SetParallelFiles( AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh )
00367 {
00368     //Have we got a parallel mesh?
00369     this->mpDistributedMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh);
00370 
00371     if (this->mpDistributedMesh == NULL)
00372     {
00373         EXCEPTION("Cannot write parallel files using a sequential mesh");
00374     }
00375     
00376     if ( PetscTools::IsSequential())
00377     {
00378         return;     // mWriteParallelFiles is not set sequentially (so we don't set up data exchange machinery)
00379     }
00380     
00381     mWriteParallelFiles = true;
00382 
00383     // Populate the global to node index map (as this will be required to add point data)
00384     
00385     //Node index that we are writing to VTK (index into mNodes and mHaloNodes as if they were concatenated)
00386     unsigned index = 0;
00387 
00388     // Owned nodes
00389     for (typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator node_iter = rMesh.GetNodeIteratorBegin();
00390          node_iter != rMesh.GetNodeIteratorEnd();
00391          ++node_iter)
00392     {
00393         mGlobalToNodeIndexMap[node_iter->GetIndex()] = index;
00394         index++;
00395     }
00396     
00397     // Halo nodes
00398     for(typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator halo_iter=this->mpDistributedMesh->GetHaloNodeIteratorBegin(); 
00399             halo_iter != this->mpDistributedMesh->GetHaloNodeIteratorEnd();
00400             ++halo_iter)
00401     {
00402         mGlobalToNodeIndexMap[(*halo_iter)->GetIndex()] = index;
00403         index++;
00404     }
00405         
00406     //Calculate the halo exchange so that node-wise payloads can be communicated
00407     this->mpDistributedMesh->CalculateNodeExchange( mNodesToSendPerProcess, mNodesToReceivePerProcess );
00408            
00409 }
00410 
00412 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
00413 void VtkMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMesh(
00414       AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh,
00415       bool keepOriginalElementIndexing)
00416 {
00417     //Have we got a parallel mesh?
00418     this->mpDistributedMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh);
00419     
00420     if ( PetscTools::IsSequential() || !mWriteParallelFiles || this->mpDistributedMesh == NULL )
00421     {
00422         AbstractTetrahedralMeshWriter<ELEMENT_DIM,SPACE_DIM>::WriteFilesUsingMesh( rMesh,keepOriginalElementIndexing );
00423     }
00424     else
00425     {
00426         //Make the local mesh into a VtkMesh
00427         assert(SPACE_DIM==3 || SPACE_DIM == 2);
00428         assert(SPACE_DIM==ELEMENT_DIM);
00429         vtkPoints* p_pts = vtkPoints::New(VTK_DOUBLE);
00430         p_pts->GetData()->SetName("Vertex positions");
00431         
00432         // Owned nodes
00433         for (typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator node_iter = rMesh.GetNodeIteratorBegin();
00434              node_iter != rMesh.GetNodeIteratorEnd();
00435              ++node_iter)
00436         {
00437             c_vector<double, SPACE_DIM> current_item = node_iter->rGetLocation();
00438             p_pts->InsertNextPoint(current_item[0], current_item[1], (SPACE_DIM==3)?current_item[2]:0.0);
00439         }
00440         
00441         // Halo nodes
00442         for(typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator halo_iter=this->mpDistributedMesh->GetHaloNodeIteratorBegin(); 
00443                 halo_iter != this->mpDistributedMesh->GetHaloNodeIteratorEnd();
00444                 ++halo_iter)
00445         {
00446             c_vector<double, SPACE_DIM> current_item = (*halo_iter)->rGetLocation();
00447             p_pts->InsertNextPoint(current_item[0], current_item[1], (SPACE_DIM==3)?current_item[2]:0.0);
00448         }       
00449         
00450         mpVtkUnstructedMesh->SetPoints(p_pts);
00451         p_pts->Delete(); //Reference counted
00452         
00453         for (typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator elem_iter = rMesh.GetElementIteratorBegin();
00454              elem_iter != rMesh.GetElementIteratorEnd();
00455              ++elem_iter)
00456         {
00457         
00458             vtkCell* p_cell=NULL;
00459             if (SPACE_DIM == 3)
00460             {
00461                 p_cell = vtkTetra::New();
00462             }
00463             if (SPACE_DIM == 2)
00464             {
00465                 p_cell = vtkTriangle::New();
00466             }
00467             vtkIdList* p_cell_id_list = p_cell->GetPointIds();
00468             for (unsigned j = 0; j < ELEMENT_DIM+1; ++j)
00469             {
00470                 unsigned global_node_index = elem_iter->GetNodeGlobalIndex(j);
00471                 p_cell_id_list->SetId(j, mGlobalToNodeIndexMap[global_node_index]);
00472             }
00473             mpVtkUnstructedMesh->InsertNextCell(p_cell->GetCellType(), p_cell_id_list);
00474             p_cell->Delete(); //Reference counted
00475         }
00476         //This block is to guard the mesh writers (vtkXMLPUnstructuredGridWriter) so that they
00477         //go out of scope, flush buffers and close files 
00478         {
00479             assert(mpVtkUnstructedMesh->CheckAttributes() == 0);
00480             vtkXMLPUnstructuredGridWriter* p_writer = vtkXMLPUnstructuredGridWriter::New();
00481 
00482             p_writer->SetDataModeToBinary();
00483  
00484             p_writer->SetNumberOfPieces(PetscTools::GetNumProcs());
00485             //p_writer->SetGhostLevel(-1);
00486             p_writer->SetStartPiece(PetscTools::GetMyRank());
00487             p_writer->SetEndPiece(PetscTools::GetMyRank());
00488 
00489 
00490             p_writer->SetInput(mpVtkUnstructedMesh);
00491             //Uninitialised stuff arises (see #1079), but you can remove
00492             //valgrind problems by removing compression:
00493             // **** REMOVE WITH CAUTION *****
00494             p_writer->SetCompressor(NULL);
00495             // **** REMOVE WITH CAUTION *****
00496             std::string pvtk_file_name = this->mpOutputFileHandler->GetOutputDirectoryFullPath() + this->mBaseName+ ".pvtu";
00497             p_writer->SetFileName(pvtk_file_name.c_str());
00498             //p_writer->PrintSelf(std::cout, vtkIndent());
00499             p_writer->Write();
00500             p_writer->Delete(); //Reference counted
00501         }
00502         
00503         //Add provenance to the individual files
00504         std::stringstream filepath;
00505         filepath << this->mBaseName << "_" << PetscTools::GetMyRank() << ".vtu";
00506         AddProvenance(filepath.str());  
00508         if (PetscTools::AmMaster())
00509         {
00510             AddProvenance(this->mBaseName+ ".pvtu");
00511         }
00512     }
00513 }
00514 
00516 // Explicit instantiation
00518 
00519 template class VtkMeshWriter<1,1>;
00520 template class VtkMeshWriter<1,2>;
00521 template class VtkMeshWriter<1,3>;
00522 template class VtkMeshWriter<2,2>; //Actually used
00523 template class VtkMeshWriter<2,3>;
00524 template class VtkMeshWriter<3,3>; //Actually used
00525 
00526 #endif //CHASTE_VTK

Generated on Mon Apr 18 11:35:35 2011 for Chaste by  doxygen 1.5.5