SkelGIS  3.0
/home/helene/Documents/These/SkelGIS/SkelGIS_Library/SkelGIS_V3/skelgis/skeletons/basic_skeletons/DMatrix/apply_reduction.hpp
Go to the documentation of this file.
00001 /*! \file apply_reduction.hpp
00002  *  \brief Definitions of the skeleton object ApplyReduction. This skeleton takes a user function and a DMatrix as inputs and return a single element.
00003  */
00004 #ifndef APPLY_REDUCTION_H
00005 #define APPLY_REDUCTION_H
00006 
00007 namespace skelgis{
00008   
00009   //================================================================================
00010   //Types of user functions
00011   //================================================================================
00012   //! _ApplyReduction_Func structure
00013   /*!
00014     These objects are not directly used by the user, but the macros used by the user, to define its functions, use these. Four specializations of this structure are available.
00015     \tparam T is the type of data to store in the input DMatrix
00016     \tparam R is the overlap distance needed by the calculation for the input DMatrix.
00017     \tparam line is the parallel distribution wanted, the default value is true, then the parallel distribution will be divided along height but not along width
00018   */
00019   //-------------------------------------------------------------------------------
00020   template <typename T,int R,bool line=true> struct _ApplyReduction_Func
00021   //-------------------------------------------------------------------------------
00022   {
00023     virtual void operator()(DMatrix <T,R>&, T& ) const =0;
00024   };
00025   //================================================================================
00026   //! _ApplyReduction_Func structure
00027   /*!
00028     \tparam T is the type of data to store in the input DMatrix
00029     R overlap template parameter is not asked and specialized to 0
00030     \tparam line is the parallel distribution wanted, the default value is true, then the parallel distribution will be divided along height but not along width
00031   */
00032   //-------------------------------------------------------------------------------
00033   template <typename T> struct _ApplyReduction_Func<T,0>
00034   //-------------------------------------------------------------------------------
00035   {
00036     virtual void operator()(DMatrix<T,0>&, T&) const =0;
00037   };
00038   //================================================================================
00039   //! _ApplyReduction_Func structure
00040   /*!
00041     \tparam T is the type of data to store in the input DMatrix
00042     \tparam R is the overlap distance needed by the calculation for the input DMatrix.
00043     The line template parameter is specialized with the false value. In this case the parallel data distribution will be done along height and width (by block)
00044   */
00045   //-------------------------------------------------------------------------------
00046   template <typename T, int R> struct _ApplyReduction_Func<T,R,false>
00047   //-------------------------------------------------------------------------------
00048   {
00049     virtual void operator()(DMatrix<T,R,false>&, T &) const =0;
00050   };
00051   //================================================================================
00052   //! _ApplyReduction_Func structure
00053   /*!
00054     \tparam T is the type of data to store in the input DMatrix
00055     R overlap template parameter is not asked and specialized to 0
00056     The line template parameter is specialized with the false value. In this case the parallel data distribution will be done along height and width (by block)
00057   */
00058   //-------------------------------------------------------------------------------
00059   template <typename T> struct _ApplyReduction_Func<T,0,false>
00060   //-------------------------------------------------------------------------------
00061   {
00062     virtual void operator()(DMatrix<T,0,false>&, T&) const =0;
00063   };
00064   //-------------------------------------------------------------------------------
00065 
00066   //================================================================================
00067   //Specializations of skeleton ApplyReduction
00068   //================================================================================
00069   //! ApplyReduction structure
00070   /*!
00071     These objects are directly called by the user to apply a function on a DMatrix.
00072     \tparam T is the type of data to store in the input DMatrix
00073     \tparam R is the overlap distance needed by the calculation for the input DMatrix.
00074     \tparam line is the parallel distribution wanted, the default value is true, then the parallel distribution will be divided along height but not along width
00075   */
00076   //-------------------------------------------------------------------------------
00077   template<class T,int R,bool line=true> struct ApplyReduction{};
00078   //-------------------------------------------------------------------------------
00079 
00080   //================================================================================
00081   //! ApplyReduction structure
00082   /*!
00083     First specialization with line to default values.
00084     \tparam T is the type of data to store in the input DMatrix
00085     \tparam R is the overlap distance needed by the calculation for the input DMatrix.
00086   */
00087   //-------------------------------------------------------------------------------
00088   template<class T,int R> struct ApplyReduction<T,R>
00089   //-------------------------------------------------------------------------------
00090   {
00091     static T apply(const _ApplyReduction_Func<T,R>& func, DMatrix<T,R>& m)
00092     {
00093       T res;
00094       iterator<T,R> it = m.begin();
00095       MPI_Status status;
00096       int MyOwnTag = Mpi_::mpi_nb;
00097 
00098       //-----exchanges of data
00099       m.getBorders();
00100       //-----
00101       //-----call to f
00102       func(m,res);
00103       //-----
00104 
00105       if(MyOwnTag>1)
00106         {
00107           if (Mpi_::mpi_rank==0)
00108             {
00109               HEADER h=m.getHeader();
00110               h.width=Mpi_::mpi_nb;
00111               h.height=1;
00112               
00113               DMatrix<T,R> mRes(h,1,true);
00114               //affectation of the proc 0 to the temporary result matrix
00115               it = mRes.begin();
00116               mRes[it] = res;
00117               ++it;
00118               
00119               T msg;
00120               for (int i=1; i < Mpi_::mpi_nb; i++)
00121                 {
00122                   char * get = new char[sizeof(T)];
00123                   //receive result from other proc , other than proc0
00124                   MPI_Recv(get, sizeof(T), MPI_BYTE, i, MyOwnTag, MPI_COMM_WORLD, &status);
00125                   memcpy(&msg, get, sizeof(T));
00126                   delete [] get;
00127                   
00128                   mRes[it] = msg;
00129                   ++it;
00130                 }
00131               //the temporary matrix now is analysed by the user function, to get only one output result
00132               func(mRes,res);
00133             }
00134           else
00135             MPI_Send(reinterpret_cast<char *>(&res), sizeof(T), MPI_BYTE, 0, MyOwnTag, MPI_COMM_WORLD);
00136         }
00137 
00138       char * get_glob = new char[sizeof(T)];
00139       get_glob = reinterpret_cast<char *>(&res);
00140       MPI_Bcast(get_glob,sizeof(T),MPI_BYTE,0,MPI_COMM_WORLD);
00141       memcpy(&res, get_glob, sizeof(T));
00142 
00143       return res;
00144     }
00145   };
00146   //-------------------------------------------------------------------------------
00147   
00148   //================================================================================
00149   //! ApplyReduction structure
00150   /*!
00151     Second specialization with line to default values and R=0
00152     \tparam T is the type of data to store in the input DMatrix
00153   */
00154   //-------------------------------------------------------------------------------
00155   template<class T> struct ApplyReduction<T,0>
00156   //-------------------------------------------------------------------------------
00157   {
00158     inline static T apply(const _ApplyReduction_Func<T,0>& func, DMatrix<T,0>& m)
00159     {
00160       T res;
00161       iterator<T,0> it = m.begin();
00162       MPI_Status status;
00163       int MyOwnTag = Mpi_::mpi_nb;
00164 
00165       //-----call to f
00166       func(m,res);
00167       //-----
00168 
00169       if(MyOwnTag>1)
00170         {
00171           if (Mpi_::mpi_rank==0)
00172             {
00173               HEADER h=m.getHeader();
00174               h.width=Mpi_::mpi_nb;
00175               h.height=1;
00176               
00177               DMatrix<T,0> mRes(h,1,true);
00178               //affectation of the proc 0 to the temporary result matrix
00179               it = mRes.begin();
00180               mRes[it] = res;
00181               ++it;
00182               
00183               T msg;
00184               for (int i=1; i < Mpi_::mpi_nb; i++)
00185                 {
00186                   char * get = new char[sizeof(T)];
00187                   //receive result from other proc , other than proc0
00188                   MPI_Recv(get, sizeof(T), MPI_BYTE, i, MyOwnTag, MPI_COMM_WORLD, &status);
00189                   memcpy(&msg, get, sizeof(T));
00190                   delete [] get;
00191                   
00192                   mRes[it] = msg;
00193                   ++it;
00194                 }
00195               //the temporary matrix now is analysed by the user function, to get only one output result
00196               func(mRes,res);
00197             }
00198           else
00199             MPI_Send(reinterpret_cast<char *>(&res), sizeof(T), MPI_BYTE, 0, MyOwnTag, MPI_COMM_WORLD);
00200         }
00201 
00202       char * get_glob = new char[sizeof(T)];
00203       get_glob = reinterpret_cast<char *>(&res);
00204       MPI_Bcast(get_glob,sizeof(T),MPI_BYTE,0,MPI_COMM_WORLD);
00205       memcpy(&res, get_glob, sizeof(T));
00206 
00207       return res;
00208     }
00209   };
00210   //-------------------------------------------------------------------------------
00211 
00212   //================================================================================
00213   //! ApplyReduction structure
00214   /*!
00215     Third specialization with line=false
00216     \tparam T is the type of data to store in the input DMatrix
00217     \tparam R is the overlap distance needed by the calculation for the input DMatrix.
00218   */
00219   //-------------------------------------------------------------------------------
00220   template<class T,int R> struct ApplyReduction<T,R,false>
00221   //-------------------------------------------------------------------------------
00222   {
00223     static T apply(const _ApplyReduction_Func<T,R,false>& func, DMatrix<T,R,false>& m)
00224     {
00225       T res;
00226       iterator<T,R> it = m.begin();
00227       MPI_Status status;
00228       int MyOwnTag = Mpi_::mpi_nb;
00229 
00230       //-----exchanges of data
00231       m.getBorders();
00232       //-----
00233       //-----call to f
00234       func(m,res);
00235       //-----
00236 
00237       if(MyOwnTag>1)
00238         {
00239           if (Mpi_::mpi_rank==0)
00240             {
00241               HEADER h=m.getHeader();
00242               h.width=Mpi_::mpi_nb;
00243               h.height=1;
00244               
00245               DMatrix<T,R,false> mRes(h,1,true);
00246               //affectation of the proc 0 to the temporary result matrix
00247               it = mRes.begin();
00248               mRes[it] = res;
00249               ++it;
00250               
00251               T msg;
00252               for (int i=1; i < Mpi_::mpi_nb; i++)
00253                 {
00254                   char * get = new char[sizeof(T)];
00255                   //receive result from other proc , other than proc0
00256                   MPI_Recv(get, sizeof(T), MPI_BYTE, i, MyOwnTag, MPI_COMM_WORLD, &status);
00257                   memcpy(&msg, get, sizeof(T));
00258                   delete [] get;
00259                   
00260                   mRes[it] = msg;
00261                   ++it;
00262                 }
00263               //the temporary matrix now is analysed by the user function, to get only one output result
00264               func(mRes,res);
00265             }
00266           else
00267             MPI_Send(reinterpret_cast<char *>(&res), sizeof(T), MPI_BYTE, 0, MyOwnTag, MPI_COMM_WORLD);
00268         }
00269 
00270       char * get_glob = new char[sizeof(T)];
00271       get_glob = reinterpret_cast<char *>(&res);
00272       MPI_Bcast(get_glob,sizeof(T),MPI_BYTE,0,MPI_COMM_WORLD);
00273       memcpy(&res, get_glob, sizeof(T));
00274 
00275       return res;
00276     }
00277   };
00278   //-------------------------------------------------------------------------------
00279 
00280   //================================================================================
00281   //! ApplyReduction structure
00282   /*!
00283     Fourth specialization with line=false and R=0
00284     \tparam T is the type of data to store in the input DMatrix
00285   */
00286   //-------------------------------------------------------------------------------
00287   template<class T> struct ApplyReduction<T,0,false>
00288   //-------------------------------------------------------------------------------
00289   {
00290     inline static T apply(const _ApplyReduction_Func<T,0,false>& func, DMatrix<T,0,false>& m)
00291     {
00292       T res;
00293       iterator<T,0> it = m.begin();
00294       MPI_Status status;
00295       int MyOwnTag = Mpi_::mpi_nb;
00296 
00297       //-----call to f
00298       func(m,res);
00299       //-----
00300 
00301       if(MyOwnTag>1)
00302         {
00303           if (Mpi_::mpi_rank==0)
00304             {
00305               HEADER h=m.getHeader();
00306               h.width=Mpi_::mpi_nb;
00307               h.height=1;
00308               
00309               DMatrix<T,0,false> mRes(h,1,true);
00310               //affectation of the proc 0 to the temporary result matrix
00311               it = mRes.begin();
00312               mRes[it] = res;
00313               ++it;
00314               
00315               T msg;
00316               for (int i=1; i < Mpi_::mpi_nb; i++)
00317                 {
00318                   char * get = new char[sizeof(T)];
00319                   //receive result from other proc , other than proc0
00320                   MPI_Recv(get, sizeof(T), MPI_BYTE, i, MyOwnTag, MPI_COMM_WORLD, &status);
00321                   memcpy(&msg, get, sizeof(T));
00322                   delete [] get;
00323                   
00324                   mRes[it] = msg;
00325                   ++it;
00326                 }
00327               //the temporary matrix now is analysed by the user function, to get only one output result
00328               func(mRes,res);
00329             }
00330           else
00331             MPI_Send(reinterpret_cast<char *>(&res), sizeof(T), MPI_BYTE, 0, MyOwnTag, MPI_COMM_WORLD);
00332         }
00333 
00334       char * get_glob = new char[sizeof(T)];
00335       get_glob = reinterpret_cast<char *>(&res);
00336       MPI_Bcast(get_glob,sizeof(T),MPI_BYTE,0,MPI_COMM_WORLD);
00337       memcpy(&res, get_glob, sizeof(T));
00338 
00339       return res;
00340     }
00341   };
00342   //-------------------------------------------------------------------------------
00343 
00344 }
00345 
00346 #endif
 All Classes Files Functions Variables Defines