|
Tpetra Matrix/Vector Services
Version of the Day
|
00001 00008 // 00009 // This example includes MPI initialization, getting a Teuchos::Comm 00010 // communicator, and printing out Tpetra version information. 00011 // 00012 00013 #include <Tpetra_DefaultPlatform.hpp> 00014 #include <Tpetra_Version.hpp> 00015 #include <Teuchos_GlobalMPISession.hpp> 00016 00017 // Do something with the given communicator. In this case, we just 00018 // print Tpetra's version to stdout on Process 0. 00019 void 00020 exampleRoutine (const Teuchos::RCP<const Teuchos::Comm<int> >& comm) 00021 { 00022 if (comm->getRank () == 0) { 00023 // On (MPI) Process 0, print out the Tpetra software version. 00024 std::cout << Tpetra::version () << std::endl << std::endl; 00025 } 00026 } 00027 00028 int 00029 main (int argc, char *argv[]) 00030 { 00031 // These "using" declarations make the code more concise, in that 00032 // you don't have to write the namespace along with the class or 00033 // object name. This is especially helpful with commonly used 00034 // things like std::endl. 00035 using std::cout; 00036 using std::endl; 00037 00038 // Start up MPI, if using MPI. Trilinos doesn't have to be built 00039 // with MPI; it's called a "serial" build if you build without MPI. 00040 // GlobalMPISession hides this implementation detail. 00041 // 00042 // Note the third argument. If you pass GlobalMPISession the 00043 // address of an std::ostream, it will print a one-line status 00044 // message with the rank on each MPI process. This may be 00045 // undesirable if running with a large number of MPI processes. 00046 // You can avoid printing anything here by passing in either 00047 // NULL or the address of a Teuchos::oblackholestream. 00048 Teuchos::GlobalMPISession mpiSession (&argc, &argv, NULL); 00049 00050 // Get a pointer to the communicator object representing 00051 // MPI_COMM_WORLD. getDefaultPlatform.getComm() doesn't create a 00052 // new object every time you call it; it just returns the same 00053 // communicator each time. Thus, you can call it anywhere and get 00054 // the same communicator. (This is handy if you don't want to pass 00055 // a communicator around everywhere, though it's always better to 00056 // parameterize your algorithms on the communicator.) 00057 // 00058 // "Tpetra::DefaultPlatform" knows whether or not we built with MPI 00059 // support. If we didn't build with MPI, we'll get a "communicator" 00060 // with size 1, whose only process has rank 0. 00061 Teuchos::RCP<const Teuchos::Comm<int> > comm = 00062 Tpetra::DefaultPlatform::getDefaultPlatform ().getComm (); 00063 00064 // Get my process' rank, and the total number of processes. 00065 // Equivalent to MPI_Comm_rank resp. MPI_Comm_size. 00066 const int myRank = comm->getRank (); 00067 const int numProcs = comm->getSize (); 00068 00069 if (myRank == 0) { 00070 cout << "Total number of processes: " << numProcs << endl; 00071 } 00072 00073 // Do something with the new communicator. 00074 exampleRoutine (comm); 00075 00076 // This tells the Trilinos test framework that the test passed. 00077 if (myRank == 0) { 00078 cout << "End Result: TEST PASSED" << endl; 00079 } 00080 00081 // GlobalMPISession calls MPI_Finalize() in its destructor, if 00082 // appropriate. You don't have to do anything here! Just return 00083 // from main(). Isn't that helpful? 00084 return 0; 00085 }
1.7.6.1