PlayaMPIComm.hpp
Go to the documentation of this file.
00001 /* @HEADER@ */
00002 // ************************************************************************
00003 // 
00004 //                 Playa: Programmable Linear Algebra
00005 //                 Copyright 2012 Sandia Corporation
00006 // 
00007 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
00008 // the U.S. Government retains certain rights in this software.
00009 //
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are
00012 // met:
00013 //
00014 // 1. Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 //
00017 // 2. Redistributions in binary form must reproduce the above copyright
00018 // notice, this list of conditions and the following disclaimer in the
00019 // documentation and/or other materials provided with the distribution.
00020 //
00021 // 3. Neither the name of the Corporation nor the names of the
00022 // contributors may be used to endorse or promote products derived from
00023 // this software without specific prior written permission.
00024 //
00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00036 //
00037 // Questions? Contact Kevin Long (kevin.long@ttu.edu)
00038 // 
00039 
00040 /* @HEADER@ */
00041 
00042 #ifndef PLAYA_MPICOMM_H
00043 #define PLAYA_MPICOMM_H
00044 
00045 /*! \file PlayaMPIComm.hpp
00046   \brief Object representation of a MPI communicator
00047 */
00048 
00049 #include "PlayaDefs.hpp"
00050 #include "PlayaMPIOp.hpp"
00051 #include "PlayaMPIDataType.hpp"
00052 #include "Teuchos_ConfigDefs.hpp"
00053 #include "Teuchos_Array.hpp"
00054 #include "Teuchos_RCP.hpp"
00055 
00056 #ifdef HAVE_MPI
00057 #include "mpi.h"
00058 #endif
00059 
00060 
00061 namespace Playa
00062 {
00063 
00064 
00065 /**
00066  * \brief Object representation of an MPI communicator.
00067  *
00068  * At present, groups are not implemented so the only communicators
00069  * are MPI_COMM_WORLD and MPI_COMM_SELF
00070  */
00071 class MPIComm
00072 {
00073 public:
00074 
00075   //! Empty constructor builds an object for MPI_COMM_WORLD
00076   MPIComm();
00077 
00078 #ifdef HAVE_MPI
00079   //! Construct a MPIComm for a given MPI communicator
00080   MPIComm(MPI_Comm comm);
00081 #endif
00082 
00083   //! Get an object representing MPI_COMM_WORLD 
00084   static MPIComm& world();
00085   //! Get an object representing MPI_COMM_SELF
00086   static MPIComm& self();
00087 
00088   //! Return process rank
00089   int getRank() const {return myRank_;}
00090 
00091   //! Return number of processors in the communicator
00092   int getNProc() const {return nProc_;}
00093 
00094   //! Synchronize all the processors in the communicator
00095   void synchronize() const ;
00096 
00097   //! @name Collective communications 
00098   //@{
00099 
00100   //! All-to-all gather-scatter
00101   void allToAll(void* sendBuf, int sendCount, const MPIDataType& sendType,
00102     void* recvBuf, int recvCount, 
00103     const MPIDataType& recvType) const ;
00104 
00105   //! Variable-length gather-scatter
00106   void allToAllv(void* sendBuf, int* sendCount, int* sendDisplacements,
00107     const MPIDataType& sendType,
00108     void* recvBuf, int* recvCount,
00109     int* recvDisplacements,
00110     const MPIDataType& recvType) const ;
00111 
00112   //! Do a collective operation, scattering the results to all processors
00113   void allReduce(void* input, void* result, int inputCount, 
00114     const MPIDataType& type,
00115     const MPIOp& op) const ;
00116 
00117 
00118   //! Gather to root 
00119   void gather(void* sendBuf, int sendCount, const MPIDataType& sendType,
00120     void* recvBuf, int recvCount, const MPIDataType& recvType,
00121     int root) const ;
00122 
00123   //! Gather variable-sized arrays to root 
00124   void gatherv(void* sendBuf, int sendCount, const MPIDataType& sendType,
00125     void* recvBuf, int* recvCount, int* displacements, 
00126     const MPIDataType& recvType, int root) const ;
00127 
00128   //! Gather to all processors
00129   void allGather(void* sendBuf, int sendCount, 
00130     const MPIDataType& sendType,
00131     void* recvBuf, int recvCount, 
00132     const MPIDataType& recvType) const ;
00133 
00134   //! Variable-length gather to all processors
00135   void allGatherv(void* sendBuf, int sendCount, 
00136     const MPIDataType& sendType,
00137     void* recvBuf, int* recvCount, int* recvDisplacements,
00138     const MPIDataType& recvType) const ;
00139 
00140   //! Broadcast 
00141   void bcast(void* msg, int length, 
00142     const MPIDataType& type, int src) const ;
00143 
00144   //@}
00145 
00146 #ifdef HAVE_MPI
00147   //! Get the MPI_Comm communicator handle 
00148   MPI_Comm getComm() const {return comm_;}
00149 #endif
00150 
00151   
00152 
00153   // errCheck() checks the return value of an MPI call and throws
00154   // a ParallelException upon failure.
00155   static void errCheck(int errCode, const std::string& methodName);
00156 
00157 private:
00158 #ifdef HAVE_MPI
00159   MPI_Comm comm_;
00160 #endif
00161 
00162   int nProc_;
00163   int myRank_;
00164 
00165   /** common initialization function, called by all ctors */
00166   void init();
00167 
00168   /** Indicate whether MPI is currently running */
00169   int mpiIsRunning() const ;
00170 };
00171 }
00172 #endif
00173 

Site Contact