|
Sierra Toolkit
Version of the Day
|
00001 /*------------------------------------------------------------------------*/ 00002 /* Copyright 2010, 2011 Sandia Corporation. */ 00003 /* Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive */ 00004 /* license for use of this work by or on behalf of the U.S. Government. */ 00005 /* Export of this program may require a license from the */ 00006 /* United States Government. */ 00007 /*------------------------------------------------------------------------*/ 00008 00013 #include <stdexcept> 00014 #include <iostream> 00015 #include <sstream> 00016 #include <algorithm> 00017 00018 #include <stk_util/environment/ReportHandler.hpp> 00019 00020 #include <stk_util/util/StaticAssert.hpp> 00021 00022 #include <stk_util/diag/Trace.hpp> 00023 #include <stk_util/parallel/ParallelComm.hpp> 00024 #include <stk_util/parallel/ParallelReduce.hpp> 00025 00026 #include <stk_mesh/base/Bucket.hpp> 00027 #include <stk_mesh/base/BulkData.hpp> 00028 #include <stk_mesh/base/MetaData.hpp> 00029 #include <stk_mesh/base/Comm.hpp> 00030 #include <stk_mesh/base/FieldData.hpp> 00031 00032 #include <boost/foreach.hpp> 00033 00034 namespace stk_classic { 00035 namespace mesh { 00036 00037 namespace { 00038 00039 std::vector< parallel::DistributedIndex::KeySpan> 00040 convert_entity_keys_to_spans( const MetaData & meta ) 00041 { 00042 // Make sure the distributed index can handle the EntityKey 00043 00044 enum { OK = StaticAssert< 00045 SameType< EntityKey::raw_key_type, 00046 parallel::DistributedIndex::KeyType >::value >::OK }; 00047 00048 // Default constructed EntityKey has all bits set. 00049 00050 const EntityKey invalid_key ; 00051 const EntityId min_id = 1 ; 00052 const EntityId max_id = invalid_key.id(); 00053 00054 const size_t rank_count = meta.entity_rank_count(); 00055 00056 std::vector< parallel::DistributedIndex::KeySpan> spans( rank_count ); 00057 00058 for ( size_t rank = 0 ; rank < rank_count ; ++rank ) { 00059 EntityKey key_min( rank , min_id ); 00060 EntityKey key_max( rank , max_id ); 00061 spans[rank].first = key_min.raw_key(); 00062 spans[rank].second = key_max.raw_key(); 00063 } 00064 00065 return spans ; 00066 } 00067 00068 void ensure_part_superset_consistency( const Entity& entity ) 00069 { 00070 std::ostringstream errs; 00071 PartVector parts; 00072 Bucket& bucket = entity.bucket(); 00073 bucket.supersets(parts); 00074 BOOST_FOREACH(Part* part, parts) { 00075 const PartVector& supersets = part->supersets(); 00076 BOOST_FOREACH(Part* superset, supersets) { 00077 if (!bucket.member(*superset)) { 00078 errs << " Due to being a member part " << part->name() << ", should have been a member of " << superset->name() << std::endl; 00079 } 00080 } 00081 } 00082 ThrowRequireMsg( errs.str() == "", 00083 "Entity " << print_entity_key(entity) << " has bad part list:\n" << errs.str() ); 00084 } 00085 00086 } 00087 00088 //---------------------------------------------------------------------- 00089 00090 BulkData::BulkData( MetaData & mesh_meta_data , 00091 ParallelMachine parallel , 00092 unsigned bucket_max_size , 00093 bool use_memory_pool ) 00094 : m_entities_index( parallel, convert_entity_keys_to_spans(mesh_meta_data) ), 00095 m_entity_repo(use_memory_pool), 00096 m_bucket_repository( 00097 *this, bucket_max_size, 00098 mesh_meta_data.entity_rank_count(), 00099 m_entity_repo 00100 ), 00101 m_entity_comm(), 00102 m_ghosting(), 00103 00104 m_mesh_meta_data( mesh_meta_data ), 00105 m_parallel_machine( parallel ), 00106 m_parallel_size( parallel_machine_size( parallel ) ), 00107 m_parallel_rank( parallel_machine_rank( parallel ) ), 00108 m_sync_count( 0 ), 00109 m_sync_state( MODIFIABLE ), 00110 m_meta_data_verified( false ), 00111 m_optimize_buckets(false), 00112 m_mesh_finalized(false) 00113 { 00114 create_ghosting( "shared" ); 00115 create_ghosting( "shared_aura" ); 00116 00117 m_sync_state = SYNCHRONIZED ; 00118 } 00119 00120 BulkData::~BulkData() 00121 { 00122 try { 00123 while ( ! m_ghosting.empty() ) { 00124 delete m_ghosting.back(); 00125 m_ghosting.pop_back(); 00126 } 00127 } catch(...){} 00128 00129 try { m_entity_comm.clear(); } catch(...){} 00130 00131 } 00132 00133 //---------------------------------------------------------------------- 00134 //---------------------------------------------------------------------- 00135 00136 void BulkData::require_ok_to_modify() const 00137 { 00138 ThrowRequireMsg( m_sync_state != SYNCHRONIZED, 00139 "NOT in the ok-to-modify state" ); 00140 } 00141 00142 void BulkData::require_entity_owner( const Entity & entity , 00143 unsigned owner ) const 00144 { 00145 const bool error_not_owner = owner != entity.owner_rank() ; 00146 00147 ThrowRequireMsg( !error_not_owner, 00148 "Entity " << print_entity_key(entity) << " owner is " << 00149 entity.owner_rank() << ", expected " << owner); 00150 } 00151 00152 void BulkData::require_good_rank_and_id(EntityRank ent_rank, EntityId ent_id) const 00153 { 00154 const size_t rank_count = m_mesh_meta_data.entity_rank_count(); 00155 const bool ok_id = entity_id_valid(ent_id); 00156 const bool ok_rank = ent_rank < rank_count ; 00157 00158 ThrowRequireMsg( ok_rank, 00159 "Bad key rank: " << ent_rank << " for id " << ent_id ); 00160 00161 ThrowRequireMsg( ok_id, "Bad key id for key: " << 00162 print_entity_key(m_mesh_meta_data, EntityKey(ent_rank, ent_id) ) ); 00163 } 00164 00165 void BulkData::require_metadata_committed() const 00166 { 00167 ThrowRequireMsg( m_mesh_meta_data.is_commit(), "MetaData not committed." ); 00168 } 00169 00170 //---------------------------------------------------------------------- 00171 00172 bool BulkData::modification_begin() 00173 { 00174 Trace_("stk_classic::mesh::BulkData::modification_begin"); 00175 00176 parallel_machine_barrier( m_parallel_machine ); 00177 00178 ThrowRequireMsg( m_mesh_finalized == false, "Unable to modifiy, BulkData has been finalized."); 00179 00180 if ( m_sync_state == MODIFIABLE && m_mesh_finalized == false ) return false ; 00181 00182 if ( ! m_meta_data_verified ) { 00183 require_metadata_committed(); 00184 00185 if (parallel_size() > 1) { 00186 verify_parallel_consistency( m_mesh_meta_data , m_parallel_machine ); 00187 } 00188 00189 m_meta_data_verified = true ; 00190 00191 m_bucket_repository.declare_nil_bucket(); 00192 } 00193 else { 00194 ++m_sync_count ; 00195 00196 // Clear out the previous transaction information 00197 // m_transaction_log.flush(); 00198 00199 m_entity_repo.clean_changes(); 00200 } 00201 00202 m_sync_state = MODIFIABLE ; 00203 00204 return true ; 00205 } 00206 00207 //---------------------------------------------------------------------- 00208 //---------------------------------------------------------------------- 00209 // The add_parts must be full ordered and consistent, 00210 // i.e. no bad parts, all supersets included, and 00211 // owner & used parts match the owner value. 00212 00213 //---------------------------------------------------------------------- 00214 00215 Entity & BulkData::declare_entity( EntityRank ent_rank , EntityId ent_id , 00216 const PartVector & parts ) 00217 { 00218 require_ok_to_modify(); 00219 00220 require_good_rank_and_id(ent_rank, ent_id); 00221 00222 EntityKey key( ent_rank , ent_id ); 00223 TraceIfWatching("stk_classic::mesh::BulkData::declare_entity", LOG_ENTITY, key); 00224 DiagIfWatching(LOG_ENTITY, key, "declaring entity with parts " << parts); 00225 00226 std::pair< Entity * , bool > result = m_entity_repo.internal_create_entity( key ); 00227 00228 Entity* declared_entity = result.first; 00229 00230 if ( result.second ) { 00231 // A new application-created entity 00232 m_entity_repo.set_entity_owner_rank( *declared_entity, m_parallel_rank); 00233 m_entity_repo.set_entity_sync_count( *declared_entity, m_sync_count); 00234 DiagIfWatching(LOG_ENTITY, key, "new entity: " << *declared_entity); 00235 } 00236 else { 00237 // An existing entity, the owner must match. 00238 require_entity_owner( *declared_entity , m_parallel_rank ); 00239 DiagIfWatching(LOG_ENTITY, key, "existing entity: " << *declared_entity); 00240 } 00241 00242 //------------------------------ 00243 00244 Part * const owns = & m_mesh_meta_data.locally_owned_part(); 00245 00246 std::vector<Part*> rem ; 00247 std::vector<Part*> add( parts ); 00248 add.push_back( owns ); 00249 00250 change_entity_parts( *declared_entity , add , rem ); 00251 00252 // m_transaction_log.insert_entity ( *(result.first) ); 00253 00254 return *declared_entity ; 00255 } 00256 00257 void BulkData::change_entity_id( EntityId id, Entity & entity) 00258 { 00259 require_ok_to_modify(); 00260 require_good_rank_and_id(entity.entity_rank(),id); 00261 00262 EntityKey key(entity.entity_rank(),id); 00263 EntityKey old_key = entity.key(); 00264 00265 m_entity_repo.update_entity_key(key,entity); 00266 00267 //We also need to swap the comm-vectors for these entities: 00268 entity_comm_swap(key, old_key); 00269 } 00270 00271 //---------------------------------------------------------------------- 00272 00273 // TODO Change the methods below to requirements (private, const invariant checkers) 00274 00275 // Do not allow any of the induced part memberships to explicitly 00276 // appear in the add or remove parts lists. 00277 // 1) Intersection part 00278 // 2) PartRelation target part 00279 // 3) Part that does not match the entity rank. 00280 00281 void BulkData::internal_verify_change_parts( const MetaData & meta , 00282 const Entity & entity , 00283 const PartVector & parts ) const 00284 { 00285 const std::vector<std::string> & rank_names = meta.entity_rank_names(); 00286 const EntityRank undef_rank = InvalidEntityRank; 00287 const EntityRank entity_rank = entity.entity_rank(); 00288 00289 bool ok = true ; 00290 std::ostringstream msg ; 00291 00292 for ( PartVector::const_iterator 00293 i = parts.begin() ; i != parts.end() ; ++i ) { 00294 00295 const Part * const p = *i ; 00296 const unsigned part_rank = p->primary_entity_rank(); 00297 00298 bool intersection_ok, rel_target_ok, rank_ok; 00299 internal_basic_part_check(p, entity_rank, undef_rank, intersection_ok, rel_target_ok, rank_ok); 00300 00301 if ( !intersection_ok || !rel_target_ok || !rank_ok ) { 00302 if ( ok ) { 00303 ok = false ; 00304 msg << "change parts for entity " << print_entity_key( entity ); 00305 msg << " , { " ; 00306 } 00307 else { 00308 msg << " , " ; 00309 } 00310 00311 msg << p->name() << "[" ; 00312 if ( part_rank < rank_names.size() ) { 00313 msg << rank_names[ part_rank ]; 00314 } 00315 else { 00316 msg << part_rank ; 00317 } 00318 msg << "] " ; 00319 if ( !intersection_ok ) { msg << "is_intersection " ; } 00320 if ( !rel_target_ok ) { msg << "is_relation_target " ; } 00321 if ( !rank_ok ) { msg << "is_bad_rank " ; } 00322 } 00323 } 00324 00325 ThrowErrorMsgIf( !ok, msg.str() << "}" ); 00326 } 00327 00328 void BulkData::internal_verify_change_parts( const MetaData & meta , 00329 const Entity & entity , 00330 const OrdinalVector & parts ) const 00331 { 00332 const std::vector<std::string> & rank_names = meta.entity_rank_names(); 00333 const EntityRank undef_rank = InvalidEntityRank; 00334 const EntityRank entity_rank = entity.entity_rank(); 00335 00336 bool ok = true ; 00337 std::ostringstream msg ; 00338 00339 for ( OrdinalVector::const_iterator 00340 i = parts.begin() ; i != parts.end() ; ++i ) { 00341 00342 const Part * const p = meta.get_parts()[*i] ; 00343 const unsigned part_rank = p->primary_entity_rank(); 00344 00345 bool intersection_ok, rel_target_ok, rank_ok; 00346 internal_basic_part_check(p, entity_rank, undef_rank, intersection_ok, rel_target_ok, rank_ok); 00347 00348 if ( !intersection_ok || !rel_target_ok || !rank_ok ) { 00349 if ( ok ) { 00350 ok = false ; 00351 msg << "change parts for entity " << print_entity_key( entity ); 00352 msg << " , { " ; 00353 } 00354 else { 00355 msg << " , " ; 00356 } 00357 00358 msg << p->name() << "[" ; 00359 if ( part_rank < rank_names.size() ) { 00360 msg << rank_names[ part_rank ]; 00361 } 00362 else { 00363 msg << part_rank ; 00364 } 00365 msg << "] " ; 00366 if ( !intersection_ok ) { msg << "is_intersection " ; } 00367 if ( !rel_target_ok ) { msg << "is_relation_target " ; } 00368 if ( !rank_ok ) { msg << "is_bad_rank " ; } 00369 } 00370 } 00371 00372 ThrowErrorMsgIf( !ok, msg.str() << "}" ); 00373 } 00374 00375 //---------------------------------------------------------------------- 00376 00377 namespace { 00378 00379 void filter_out( std::vector<unsigned> & vec , 00380 const PartVector & parts , 00381 PartVector & removed ) 00382 { 00383 std::vector<unsigned>::iterator i , j ; 00384 i = j = vec.begin(); 00385 00386 PartVector::const_iterator ip = parts.begin() ; 00387 00388 while ( j != vec.end() && ip != parts.end() ) { 00389 Part * const p = *ip ; 00390 if ( p->mesh_meta_data_ordinal() < *j ) { ++ip ; } 00391 else if ( *j < p->mesh_meta_data_ordinal() ) { *i = *j ; ++i ; ++j ; } 00392 else { 00393 removed.push_back( p ); 00394 ++j ; 00395 ++ip ; 00396 } 00397 } 00398 00399 if ( i != j ) { vec.erase( i , j ); } 00400 } 00401 00402 void filter_out( std::vector<unsigned> & vec , 00403 const OrdinalVector & parts , 00404 OrdinalVector & removed ) 00405 { 00406 std::vector<unsigned>::iterator i , j ; 00407 i = j = vec.begin(); 00408 00409 OrdinalVector::const_iterator ip = parts.begin() ; 00410 00411 while ( j != vec.end() && ip != parts.end() ) { 00412 if ( *ip < *j ) { ++ip ; } 00413 else if ( *j < *ip ) { *i = *j ; ++i ; ++j ; } 00414 else { 00415 removed.push_back( *ip ); 00416 ++j ; 00417 ++ip ; 00418 } 00419 } 00420 00421 if ( i != j ) { vec.erase( i , j ); } 00422 } 00423 00424 void merge_in( std::vector<unsigned> & vec , const PartVector & parts ) 00425 { 00426 std::vector<unsigned>::iterator i = vec.begin(); 00427 PartVector::const_iterator ip = parts.begin() ; 00428 00429 for ( ; i != vec.end() && ip != parts.end() ; ++i ) { 00430 00431 const unsigned ord = (*ip)->mesh_meta_data_ordinal(); 00432 00433 if ( ord <= *i ) { 00434 if ( ord < *i ) { i = vec.insert( i , ord ); } 00435 // Now have: ord == *i 00436 ++ip ; 00437 } 00438 } 00439 00440 for ( ; ip != parts.end() ; ++ip ) { 00441 const unsigned ord = (*ip)->mesh_meta_data_ordinal(); 00442 vec.push_back( ord ); 00443 } 00444 } 00445 00446 void merge_in( std::vector<unsigned> & vec , const OrdinalVector & parts ) 00447 { 00448 std::vector<unsigned>::iterator i = vec.begin(); 00449 OrdinalVector::const_iterator ip = parts.begin() ; 00450 00451 for ( ; i != vec.end() && ip != parts.end() ; ++i ) { 00452 00453 const unsigned ord = *ip; 00454 00455 if ( ord <= *i ) { 00456 if ( ord < *i ) { i = vec.insert( i , ord ); } 00457 // Now have: ord == *i 00458 ++ip ; 00459 } 00460 } 00461 00462 for ( ; ip != parts.end() ; ++ip ) { 00463 vec.push_back( *ip ); 00464 } 00465 } 00466 00467 } 00468 00469 // The 'add_parts' and 'remove_parts' are complete and disjoint. 00470 // Changes need to have parallel resolution during 00471 // modification_end. 00472 00473 void BulkData::internal_change_entity_parts( 00474 Entity & entity , 00475 const PartVector & add_parts , 00476 const PartVector & remove_parts ) 00477 { 00478 TraceIfWatching("stk_classic::mesh::BulkData::internal_change_entity_parts", LOG_ENTITY, entity.key()); 00479 DiagIfWatching(LOG_ENTITY, entity.key(), "entity state: " << entity); 00480 DiagIfWatching(LOG_ENTITY, entity.key(), "add_parts: " << add_parts); 00481 DiagIfWatching(LOG_ENTITY, entity.key(), "remove_parts: " << remove_parts); 00482 00483 Bucket * const k_old = m_entity_repo.get_entity_bucket( entity ); 00484 00485 const unsigned i_old = entity.bucket_ordinal() ; 00486 00487 if ( k_old && k_old->member_all( add_parts ) && 00488 ! k_old->member_any( remove_parts ) ) { 00489 // Is already a member of all add_parts, 00490 // is not a member of any remove_parts, 00491 // thus nothing to do. 00492 return ; 00493 } 00494 00495 PartVector parts_removed ; 00496 00497 OrdinalVector parts_total ; // The final part list 00498 00499 //-------------------------------- 00500 00501 if ( k_old ) { 00502 // Keep any of the existing bucket's parts 00503 // that are not a remove part. 00504 // This will include the 'intersection' parts. 00505 // 00506 // These parts are properly ordered and unique. 00507 00508 const std::pair<const unsigned *, const unsigned*> 00509 bucket_parts = k_old->superset_part_ordinals(); 00510 00511 const unsigned * parts_begin = bucket_parts.first; 00512 const unsigned * parts_end = bucket_parts.second; 00513 00514 const unsigned num_bucket_parts = parts_end - parts_begin; 00515 parts_total.reserve( num_bucket_parts + add_parts.size() ); 00516 parts_total.insert( parts_total.begin(), parts_begin , parts_end); 00517 00518 if ( !remove_parts.empty() ) { 00519 parts_removed.reserve(remove_parts.size()); 00520 filter_out( parts_total , remove_parts , parts_removed ); 00521 } 00522 } 00523 else { 00524 parts_total.reserve(add_parts.size()); 00525 } 00526 00527 if ( !add_parts.empty() ) { 00528 merge_in( parts_total , add_parts ); 00529 } 00530 00531 if ( parts_total.empty() ) { 00532 // Always a member of the universal part. 00533 const unsigned univ_ord = 00534 m_mesh_meta_data.universal_part().mesh_meta_data_ordinal(); 00535 parts_total.push_back( univ_ord ); 00536 } 00537 00538 //-------------------------------- 00539 // Move the entity to the new bucket. 00540 00541 Bucket * k_new = 00542 m_bucket_repository.declare_bucket( 00543 entity.entity_rank(), 00544 parts_total.size(), 00545 & parts_total[0] , 00546 m_mesh_meta_data.get_fields() 00547 ); 00548 00549 // If changing buckets then copy its field values from old to new bucket 00550 00551 if ( k_old ) { 00552 m_bucket_repository.copy_fields( *k_new , k_new->size() , *k_old , i_old ); 00553 } 00554 else { 00555 m_bucket_repository.initialize_fields( *k_new , k_new->size() ); 00556 } 00557 00558 // Set the new bucket 00559 m_entity_repo.change_entity_bucket( *k_new, entity, k_new->size() ); 00560 m_bucket_repository.add_entity_to_bucket( entity, *k_new ); 00561 00562 // If changing buckets then remove the entity from the bucket, 00563 if ( k_old && k_old->capacity() > 0) { m_bucket_repository.remove_entity( k_old , i_old ); } 00564 00565 // Update the change counter to the current cycle. 00566 m_entity_repo.set_entity_sync_count( entity, m_sync_count ); 00567 00568 // Propagate part changes through the entity's relations. 00569 00570 internal_propagate_part_changes( entity , parts_removed ); 00571 00572 #ifndef NDEBUG 00573 //ensure_part_superset_consistency( entity ); 00574 #endif 00575 } 00576 00577 void BulkData::internal_change_entity_parts( 00578 Entity & entity , 00579 const OrdinalVector & add_parts , 00580 const OrdinalVector & remove_parts, 00581 bool always_propagate_internal_changes ) 00582 { 00583 TraceIfWatching("stk_classic::mesh::BulkData::internal_change_entity_parts", LOG_ENTITY, entity.key()); 00584 DiagIfWatching(LOG_ENTITY, entity.key(), "entity state: " << entity); 00585 DiagIfWatching(LOG_ENTITY, entity.key(), "add_parts: " << add_parts); 00586 DiagIfWatching(LOG_ENTITY, entity.key(), "remove_parts: " << remove_parts); 00587 00588 Bucket * const k_old = m_entity_repo.get_entity_bucket( entity ); 00589 00590 const unsigned i_old = entity.bucket_ordinal() ; 00591 00592 if ( k_old && k_old->member_all( add_parts ) && 00593 ! k_old->member_any( remove_parts ) ) { 00594 // Is already a member of all add_parts, 00595 // is not a member of any remove_parts, 00596 // thus nothing to do. 00597 return ; 00598 } 00599 00600 OrdinalVector parts_removed ; 00601 00602 OrdinalVector parts_total ; // The final part list 00603 00604 //-------------------------------- 00605 00606 if ( k_old ) { 00607 // Keep any of the existing bucket's parts 00608 // that are not a remove part. 00609 // This will include the 'intersection' parts. 00610 // 00611 // These parts are properly ordered and unique. 00612 00613 const std::pair<const unsigned *, const unsigned*> 00614 bucket_parts = k_old->superset_part_ordinals(); 00615 00616 const unsigned * parts_begin = bucket_parts.first; 00617 const unsigned * parts_end = bucket_parts.second; 00618 00619 const unsigned num_bucket_parts = parts_end - parts_begin; 00620 parts_total.reserve( num_bucket_parts + add_parts.size() ); 00621 parts_total.insert( parts_total.begin(), parts_begin , parts_end); 00622 00623 if ( !remove_parts.empty() ) { 00624 parts_removed.reserve(remove_parts.size()); 00625 filter_out( parts_total , remove_parts , parts_removed ); 00626 } 00627 } 00628 else { 00629 parts_total.reserve(add_parts.size()); 00630 } 00631 00632 if ( !add_parts.empty() ) { 00633 merge_in( parts_total , add_parts ); 00634 } 00635 00636 if ( parts_total.empty() ) { 00637 // Always a member of the universal part. 00638 const unsigned univ_ord = 00639 m_mesh_meta_data.universal_part().mesh_meta_data_ordinal(); 00640 parts_total.push_back( univ_ord ); 00641 } 00642 00643 //-------------------------------- 00644 // Move the entity to the new bucket. 00645 00646 Bucket * k_new = 00647 m_bucket_repository.declare_bucket( 00648 entity.entity_rank(), 00649 parts_total.size(), 00650 & parts_total[0] , 00651 m_mesh_meta_data.get_fields() 00652 ); 00653 00654 // If changing buckets then copy its field values from old to new bucket 00655 00656 if ( k_old ) { 00657 m_bucket_repository.copy_fields( *k_new , k_new->size() , *k_old , i_old ); 00658 } 00659 else { 00660 m_bucket_repository.initialize_fields( *k_new , k_new->size() ); 00661 } 00662 00663 // Set the new bucket 00664 m_entity_repo.change_entity_bucket( *k_new, entity, k_new->size() ); 00665 m_bucket_repository.add_entity_to_bucket( entity, *k_new ); 00666 00667 // If changing buckets then remove the entity from the bucket, 00668 if ( k_old && k_old->capacity() > 0) { m_bucket_repository.remove_entity( k_old , i_old ); } 00669 00670 // Update the change counter to the current cycle. 00671 m_entity_repo.set_entity_sync_count( entity, m_sync_count ); 00672 00673 // Propagate part changes through the entity's relations. 00674 //(Only propagate part changes for parts which have a primary-entity-rank that matches 00675 // the entity's rank. Other parts don't get induced...) 00676 00677 const PartVector& all_parts = m_mesh_meta_data.get_parts(); 00678 00679 OrdinalVector rank_parts_removed; 00680 for(OrdinalVector::const_iterator pr=parts_removed.begin(), prend=parts_removed.end(); pr!=prend; ++pr) { 00681 if (all_parts[*pr]->primary_entity_rank() == entity.entity_rank()) { 00682 rank_parts_removed.push_back(*pr); 00683 } 00684 } 00685 00686 if (always_propagate_internal_changes || 00687 !rank_parts_removed.empty() || !m_mesh_meta_data.get_field_relations().empty()) { 00688 internal_propagate_part_changes( entity , rank_parts_removed ); 00689 } 00690 00691 #ifndef NDEBUG 00692 //ensure_part_superset_consistency( entity ); 00693 #endif 00694 } 00695 00696 //---------------------------------------------------------------------- 00697 00698 bool BulkData::destroy_entity( Entity * & entity_in ) 00699 { 00700 Entity & entity = *entity_in ; 00701 00702 TraceIfWatching("stk_classic::mesh::BulkData::destroy_entity", LOG_ENTITY, entity.key()); 00703 DiagIfWatching(LOG_ENTITY, entity.key(), "entity state: " << entity); 00704 00705 require_ok_to_modify( ); 00706 00707 bool has_upward_relation = false ; 00708 00709 for ( PairIterRelation 00710 irel = entity.relations() ; 00711 ! irel.empty() && ! has_upward_relation ; ++irel ) { 00712 00713 has_upward_relation = entity.entity_rank() <= irel->entity_rank(); 00714 } 00715 00716 if ( has_upward_relation ) { return false ; } 00717 00718 if ( EntityLogDeleted == entity.log_query() ) { 00719 // Cannot already be destroyed. 00720 return false ; 00721 } 00722 //------------------------------ 00723 // Immediately remove it from relations and buckets. 00724 // Postpone deletion until modification_end to be sure that 00725 // 1) No attempt is made to re-create it. 00726 // 2) Parallel index is cleaned up. 00727 // 3) Parallel sharing is cleaned up. 00728 // 4) Parallel ghosting is cleaned up. 00729 // 00730 // Must clean up the parallel lists before fully deleting the entity. 00731 00732 // It is important that relations be destroyed in reverse order so that 00733 // the higher (back) relations are destroyed first. 00734 while ( ! entity.relations().empty() ) { 00735 destroy_relation( entity , 00736 * entity.relations().back().entity(), 00737 entity.relations().back().identifier()); 00738 } 00739 00740 // We need to save these items and call remove_entity AFTER the call to 00741 // destroy_later because remove_entity may destroy the bucket 00742 // which would cause problems in m_entity_repo.destroy_later because it 00743 // makes references to the entity's original bucket. 00744 Bucket& orig_bucket = entity.bucket(); 00745 unsigned orig_bucket_ordinal = entity.bucket_ordinal(); 00746 00747 // Set the bucket to 'bucket_nil' which: 00748 // 1) has no parts at all 00749 // 2) has no field data 00750 // 3) has zero capacity 00751 // 00752 // This keeps the entity-bucket methods from catastrophically failing 00753 // with a bad bucket pointer. 00754 00755 m_entity_repo.destroy_later( entity, m_bucket_repository.get_nil_bucket() ); 00756 00757 m_bucket_repository.remove_entity( &orig_bucket , orig_bucket_ordinal ); 00758 00759 // Add destroyed entity to the transaction 00760 // m_transaction_log.delete_entity ( *entity_in ); 00761 00762 // Set the calling entity-pointer to NULL; 00763 // hopefully the user-code will clean up any outstanding 00764 // references to this entity. 00765 00766 entity_in = NULL ; 00767 00768 return true ; 00769 } 00770 00771 //---------------------------------------------------------------------- 00772 00773 void BulkData::generate_new_entities(const std::vector<size_t>& requests, 00774 std::vector<Entity *>& requested_entities) 00775 { 00776 Trace_("stk_classic::mesh::BulkData::generate_new_entities"); 00777 00778 typedef stk_classic::parallel::DistributedIndex::KeyType KeyType; 00779 std::vector< std::vector<KeyType> > 00780 requested_key_types; 00781 m_entities_index.generate_new_keys(requests, requested_key_types); 00782 00783 //generating 'owned' entities 00784 Part * const owns = & m_mesh_meta_data.locally_owned_part(); 00785 00786 std::vector<Part*> rem ; 00787 std::vector<Part*> add; 00788 add.push_back( owns ); 00789 00790 requested_entities.clear(); 00791 unsigned cnt=0; 00792 for (std::vector< std::vector<KeyType> >::const_iterator itr = requested_key_types.begin(); itr != requested_key_types.end(); ++itr) { 00793 const std::vector<KeyType>& key_types = *itr; 00794 for (std::vector<KeyType>::const_iterator 00795 kitr = key_types.begin(); kitr != key_types.end(); ++kitr) { 00796 ++cnt; 00797 } 00798 } 00799 requested_entities.reserve(cnt); 00800 00801 for (std::vector< std::vector<KeyType> >::const_iterator itr = requested_key_types.begin(); itr != requested_key_types.end(); ++itr) { 00802 const std::vector<KeyType>& key_types = *itr; 00803 for (std::vector<KeyType>::const_iterator 00804 kitr = key_types.begin(); kitr != key_types.end(); ++kitr) { 00805 EntityKey key(&(*kitr)); 00806 std::pair<Entity *, bool> result = m_entity_repo.internal_create_entity(key); 00807 00808 //if an entity is declare with the declare_entity function in 00809 //the same modification cycle as the generate_new_entities 00810 //function, and it happens to generate a key that was declare 00811 //previously in the same cycle it is an error 00812 ThrowErrorMsgIf( ! result.second, 00813 "Generated " << print_entity_key(m_mesh_meta_data, key) << 00814 " which was already used in this modification cycle."); 00815 00816 // A new application-created entity 00817 00818 Entity* new_entity = result.first; 00819 00820 m_entity_repo.set_entity_owner_rank( *new_entity, m_parallel_rank); 00821 m_entity_repo.set_entity_sync_count( *new_entity, m_sync_count); 00822 00823 //add entity to 'owned' part 00824 change_entity_parts( *new_entity , add , rem ); 00825 requested_entities.push_back(new_entity); 00826 } 00827 } 00828 } 00829 00830 00831 //---------------------------------------------------------------------- 00832 //---------------------------------------------------------------------- 00833 00834 } // namespace mesh 00835 } // namespace stk_classic 00836