|
Sierra Toolkit
Version of the Day
|
00001 /*------------------------------------------------------------------------*/ 00002 /* Copyright 2010 Sandia Corporation. */ 00003 /* Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive */ 00004 /* license for use of this work by or on behalf of the U.S. Government. */ 00005 /* Export of this program may require a license from the */ 00006 /* United States Government. */ 00007 /*------------------------------------------------------------------------*/ 00008 00009 00010 #include <stdexcept> 00011 #include <sstream> 00012 #include <algorithm> 00013 00014 #include <stk_util/parallel/ParallelComm.hpp> 00015 #include <stk_util/parallel/ParallelReduce.hpp> 00016 00017 #include <stk_mesh/base/BulkData.hpp> 00018 #include <stk_mesh/base/FieldData.hpp> 00019 #include <stk_mesh/base/FieldParallel.hpp> 00020 00021 00022 namespace stk_classic { 00023 namespace mesh { 00024 00025 void communicate_field_data( 00026 const Ghosting & ghosts , 00027 const std::vector< const FieldBase *> & fields ) 00028 { 00029 if ( fields.empty() ) { return; } 00030 00031 const BulkData & mesh = BulkData::get(ghosts); 00032 const unsigned parallel_size = mesh.parallel_size(); 00033 const unsigned parallel_rank = mesh.parallel_rank(); 00034 00035 const std::vector<const FieldBase *>::const_iterator fe = fields.end(); 00036 const std::vector<const FieldBase *>::const_iterator fb = fields.begin(); 00037 std::vector<const FieldBase *>::const_iterator fi ; 00038 00039 // Sizing for send and receive 00040 00041 const unsigned zero = 0 ; 00042 std::vector<unsigned> send_size( parallel_size , zero ); 00043 std::vector<unsigned> recv_size( parallel_size , zero ); 00044 00045 for ( std::vector<Entity*>::const_iterator 00046 i = mesh.entity_comm().begin() ; 00047 i != mesh.entity_comm().end() ; ++i ) { 00048 Entity & e = **i ; 00049 const bool owned = e.owner_rank() == parallel_rank ; 00050 00051 unsigned e_size = 0 ; 00052 for ( fi = fb ; fi != fe ; ++fi ) { 00053 const FieldBase & f = **fi ; 00054 e_size += field_data_size( f , e ); 00055 } 00056 00057 for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) { 00058 if ( ghosts.ordinal() == ec->ghost_id ) { 00059 if ( owned ) { 00060 send_size[ ec->proc ] += e_size ; 00061 } 00062 else { 00063 recv_size[ ec->proc ] += e_size ; 00064 } 00065 } 00066 } 00067 } 00068 00069 // Allocate send and receive buffers: 00070 00071 CommAll sparse ; 00072 00073 { 00074 const unsigned * const s_size = & send_size[0] ; 00075 const unsigned * const r_size = & recv_size[0] ; 00076 sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, r_size); 00077 } 00078 00079 // Send packing: 00080 00081 for ( std::vector<Entity*>::const_iterator 00082 i = mesh.entity_comm().begin() ; 00083 i != mesh.entity_comm().end() ; ++i ) { 00084 Entity & e = **i ; 00085 if ( e.owner_rank() == parallel_rank ) { 00086 00087 for ( fi = fb ; fi != fe ; ++fi ) { 00088 const FieldBase & f = **fi ; 00089 const unsigned size = field_data_size( f , e ); 00090 00091 if ( size ) { 00092 unsigned char * ptr = 00093 reinterpret_cast<unsigned char *>(field_data( f , e )); 00094 00095 for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) { 00096 00097 if ( ghosts.ordinal() == ec->ghost_id ) { 00098 CommBuffer & b = sparse.send_buffer( ec->proc ); 00099 b.pack<unsigned char>( ptr , size ); 00100 } 00101 } 00102 } 00103 } 00104 } 00105 } 00106 00107 // Communicate: 00108 00109 sparse.communicate(); 00110 00111 // Unpack for recv: 00112 00113 for ( std::vector<Entity*>::const_iterator 00114 i = mesh.entity_comm().begin() ; 00115 i != mesh.entity_comm().end() ; ++i ) { 00116 Entity & e = **i ; 00117 if ( e.owner_rank() != parallel_rank ) { 00118 00119 for ( fi = fb ; fi != fe ; ++fi ) { 00120 const FieldBase & f = **fi ; 00121 const unsigned size = field_data_size( f , e ); 00122 00123 if ( size ) { 00124 unsigned char * ptr = 00125 reinterpret_cast<unsigned char *>(field_data( f , e )); 00126 00127 for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() ; ++ec ) { 00128 00129 if ( ghosts.ordinal() == ec->ghost_id ) { 00130 CommBuffer & b = sparse.recv_buffer( ec->proc ); 00131 b.unpack<unsigned char>( ptr , size ); 00132 } 00133 } 00134 } 00135 } 00136 } 00137 } 00138 } 00139 00140 // Heterogeneity? 00141 00142 void communicate_field_data( 00143 ParallelMachine machine, 00144 const std::vector<EntityProc> & domain , 00145 const std::vector<EntityProc> & range , 00146 const std::vector<const FieldBase *> & fields) 00147 { 00148 if ( fields.empty() ) { return; } 00149 00150 const unsigned parallel_size = parallel_machine_size( machine ); 00151 const unsigned parallel_rank = parallel_machine_rank( machine ); 00152 const bool asymmetric = & domain != & range ; 00153 00154 const std::vector<const FieldBase *>::const_iterator fe = fields.end(); 00155 const std::vector<const FieldBase *>::const_iterator fb = fields.begin(); 00156 std::vector<const FieldBase *>::const_iterator fi ; 00157 00158 // Sizing for send and receive 00159 00160 const unsigned zero = 0 ; 00161 std::vector<unsigned> send_size( parallel_size , zero ); 00162 std::vector<unsigned> recv_size( parallel_size , zero ); 00163 00164 std::vector<EntityProc>::const_iterator i ; 00165 00166 for ( i = domain.begin() ; i != domain.end() ; ++i ) { 00167 Entity & e = * i->first ; 00168 const unsigned p = i->second ; 00169 00170 if ( asymmetric || parallel_rank == e.owner_rank() ) { 00171 unsigned e_size = 0 ; 00172 for ( fi = fb ; fi != fe ; ++fi ) { 00173 const FieldBase & f = **fi ; 00174 e_size += field_data_size( f , e ); 00175 } 00176 send_size[ p ] += e_size ; 00177 } 00178 } 00179 00180 for ( i = range.begin() ; i != range.end() ; ++i ) { 00181 Entity & e = * i->first ; 00182 const unsigned p = i->second ; 00183 00184 if ( asymmetric || p == e.owner_rank() ) { 00185 unsigned e_size = 0 ; 00186 for ( fi = fb ; fi != fe ; ++fi ) { 00187 const FieldBase & f = **fi ; 00188 e_size += field_data_size( f , e ); 00189 } 00190 recv_size[ p ] += e_size ; 00191 } 00192 } 00193 00194 // Allocate send and receive buffers: 00195 00196 CommAll sparse ; 00197 00198 { 00199 const unsigned * const s_size = & send_size[0] ; 00200 const unsigned * const r_size = & recv_size[0] ; 00201 sparse.allocate_buffers( machine, parallel_size / 4 , s_size, r_size); 00202 } 00203 00204 // Pack for send: 00205 00206 for ( i = domain.begin() ; i != domain.end() ; ++i ) { 00207 Entity & e = * i->first ; 00208 const unsigned p = i->second ; 00209 00210 if ( asymmetric || parallel_rank == e.owner_rank() ) { 00211 CommBuffer & b = sparse.send_buffer( p ); 00212 for ( fi = fb ; fi != fe ; ++fi ) { 00213 const FieldBase & f = **fi ; 00214 const unsigned size = field_data_size( f , e ); 00215 if ( size ) { 00216 unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e )); 00217 b.pack<unsigned char>( ptr , size ); 00218 } 00219 } 00220 } 00221 } 00222 00223 // Communicate: 00224 00225 sparse.communicate(); 00226 00227 // Unpack for recv: 00228 00229 for ( i = range.begin() ; i != range.end() ; ++i ) { 00230 Entity & e = * i->first ; 00231 const unsigned p = i->second ; 00232 00233 if ( asymmetric || p == e.owner_rank() ) { 00234 CommBuffer & b = sparse.recv_buffer( p ); 00235 for ( fi = fb ; fi != fe ; ++fi ) { 00236 const FieldBase & f = **fi ; 00237 const unsigned size = field_data_size( f , e ); 00238 if ( size ) { 00239 unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e )); 00240 b.unpack<unsigned char>( ptr , size ); 00241 } 00242 } 00243 } 00244 } 00245 } 00246 00247 //---------------------------------------------------------------------- 00248 00249 void communicate_field_data( 00250 const BulkData & mesh , 00251 const unsigned field_count , 00252 const FieldBase * fields[] , 00253 CommAll & sparse ) 00254 { 00255 const std::vector<Entity*> & entity_comm = mesh.entity_comm(); 00256 00257 const unsigned parallel_size = mesh.parallel_size(); 00258 00259 // Sizing for send and receive 00260 00261 const unsigned zero = 0 ; 00262 std::vector<unsigned> msg_size( parallel_size , zero ); 00263 00264 size_t j = 0; 00265 00266 for ( j = 0 ; j < field_count ; ++j ) { 00267 const FieldBase & f = * fields[j] ; 00268 for ( std::vector<Entity*>::const_iterator 00269 i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) { 00270 Entity & e = **i ; 00271 const unsigned size = field_data_size( f , e ); 00272 if ( size ) { 00273 for ( PairIterEntityComm 00274 ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) { 00275 msg_size[ ec->proc ] += size ; 00276 } 00277 } 00278 } 00279 } 00280 00281 // Allocate send and receive buffers: 00282 00283 { 00284 const unsigned * const s_size = & msg_size[0] ; 00285 sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, s_size); 00286 } 00287 00288 // Pack for send: 00289 00290 for ( j = 0 ; j < field_count ; ++j ) { 00291 const FieldBase & f = * fields[j] ; 00292 for ( std::vector<Entity*>::const_iterator 00293 i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) { 00294 Entity & e = **i ; 00295 const unsigned size = field_data_size( f , e ); 00296 if ( size ) { 00297 unsigned char * ptr = 00298 reinterpret_cast<unsigned char *>(field_data( f , e )); 00299 for ( PairIterEntityComm 00300 ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) { 00301 CommBuffer & b = sparse.send_buffer( ec->proc ); 00302 b.pack<unsigned char>( ptr , size ); 00303 } 00304 } 00305 } 00306 } 00307 00308 // Communicate: 00309 00310 sparse.communicate(); 00311 } 00312 00313 void communicate_field_data_verify_read( CommAll & sparse ) 00314 { 00315 std::ostringstream msg ; 00316 int error = 0 ; 00317 for ( unsigned p = 0 ; p < sparse.parallel_size() ; ++p ) { 00318 if ( sparse.recv_buffer( p ).remaining() ) { 00319 msg << "P" << sparse.parallel_rank() 00320 << " Unread data from P" << p << std::endl ; 00321 error = 1 ; 00322 } 00323 } 00324 all_reduce( sparse.parallel() , ReduceSum<1>( & error ) ); 00325 ThrowErrorMsgIf( error, msg.str() ); 00326 } 00327 00328 //---------------------------------------------------------------------- 00329 00330 } // namespace mesh 00331 } // namespace stk_classic 00332