|
Kokkos Core Kernels Package
Version of the Day
|
00001 /* 00002 //@HEADER 00003 // ************************************************************************ 00004 // 00005 // Kokkos: Manycore Performance-Portable Multidimensional Arrays 00006 // Copyright (2012) Sandia Corporation 00007 // 00008 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, 00009 // the U.S. Government retains certain rights in this software. 00010 // 00011 // Redistribution and use in source and binary forms, with or without 00012 // modification, are permitted provided that the following conditions are 00013 // met: 00014 // 00015 // 1. Redistributions of source code must retain the above copyright 00016 // notice, this list of conditions and the following disclaimer. 00017 // 00018 // 2. Redistributions in binary form must reproduce the above copyright 00019 // notice, this list of conditions and the following disclaimer in the 00020 // documentation and/or other materials provided with the distribution. 00021 // 00022 // 3. Neither the name of the Corporation nor the names of the 00023 // contributors may be used to endorse or promote products derived from 00024 // this software without specific prior written permission. 00025 // 00026 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY 00027 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 00028 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 00029 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE 00030 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 00031 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 00032 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00033 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00034 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00035 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00036 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00037 // 00038 // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov) 00039 // 00040 // ************************************************************************ 00041 //@HEADER 00042 */ 00043 00049 00050 #ifndef KOKKOS_UNORDERED_MAP_HPP 00051 #define KOKKOS_UNORDERED_MAP_HPP 00052 00053 #include <Kokkos_Core.hpp> 00054 #include <Kokkos_Functional.hpp> 00055 00056 #include <Kokkos_Bitset.hpp> 00057 00058 #include <impl/Kokkos_Traits.hpp> 00059 #include <impl/Kokkos_UnorderedMap_impl.hpp> 00060 00061 00062 #include <iostream> 00063 00064 #include <stdint.h> 00065 #include <stdexcept> 00066 00067 00068 namespace Kokkos { 00069 00070 enum { UnorderedMapInvalidIndex = ~0u }; 00071 00085 00086 class UnorderedMapInsertResult 00087 { 00088 private: 00089 enum Status{ 00090 SUCCESS = 1u << 31 00091 , EXISTING = 1u << 30 00092 , FREED_EXISTING = 1u << 29 00093 , LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING) 00094 }; 00095 00096 public: 00098 KOKKOS_FORCEINLINE_FUNCTION 00099 bool success() const { return (m_status & SUCCESS); } 00100 00102 KOKKOS_FORCEINLINE_FUNCTION 00103 bool existing() const { return (m_status & EXISTING); } 00104 00106 KOKKOS_FORCEINLINE_FUNCTION 00107 bool failed() const { return m_index == UnorderedMapInvalidIndex; } 00108 00111 KOKKOS_FORCEINLINE_FUNCTION 00112 bool freed_existing() const { return (m_status & FREED_EXISTING); } 00113 00116 KOKKOS_FORCEINLINE_FUNCTION 00117 uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); } 00118 00120 KOKKOS_FORCEINLINE_FUNCTION 00121 uint32_t index() const { return m_index; } 00122 00123 KOKKOS_FORCEINLINE_FUNCTION 00124 UnorderedMapInsertResult() 00125 : m_index(UnorderedMapInvalidIndex) 00126 , m_status(0) 00127 {} 00128 00129 KOKKOS_FORCEINLINE_FUNCTION 00130 void increment_list_position() 00131 { 00132 m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u; 00133 } 00134 00135 KOKKOS_FORCEINLINE_FUNCTION 00136 void set_existing(uint32_t i, bool arg_freed_existing) 00137 { 00138 m_index = i; 00139 m_status = EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position(); 00140 } 00141 00142 KOKKOS_FORCEINLINE_FUNCTION 00143 void set_success(uint32_t i) 00144 { 00145 m_index = i; 00146 m_status = SUCCESS | list_position(); 00147 } 00148 00149 private: 00150 uint32_t m_index; 00151 uint32_t m_status; 00152 }; 00153 00209 template < typename Key 00210 , typename Value 00211 , typename Device = Kokkos::DefaultExecutionSpace 00212 , typename Hasher = pod_hash<typename Impl::remove_const<Key>::type> 00213 , typename EqualTo = pod_equal_to<typename Impl::remove_const<Key>::type> 00214 > 00215 class UnorderedMap 00216 { 00217 public: 00219 00220 00221 //key_types 00222 typedef Key declared_key_type; 00223 typedef typename Impl::remove_const<declared_key_type>::type key_type; 00224 typedef typename Impl::add_const<key_type>::type const_key_type; 00225 00226 //value_types 00227 typedef Value declared_value_type; 00228 typedef typename Impl::remove_const<declared_value_type>::type value_type; 00229 typedef typename Impl::add_const<value_type>::type const_value_type; 00230 00231 typedef Device device_type; 00232 typedef Hasher hasher_type; 00233 typedef EqualTo equal_to_type; 00234 typedef uint32_t size_type; 00235 00236 //map_types 00237 typedef UnorderedMap<declared_key_type,declared_value_type,device_type,hasher_type,equal_to_type> declared_map_type; 00238 typedef UnorderedMap<key_type,value_type,device_type,hasher_type,equal_to_type> insertable_map_type; 00239 typedef UnorderedMap<const_key_type,value_type,device_type,hasher_type,equal_to_type> modifiable_map_type; 00240 typedef UnorderedMap<const_key_type,const_value_type,device_type,hasher_type,equal_to_type> const_map_type; 00241 00242 static const bool is_set = Impl::is_same<void,value_type>::value; 00243 static const bool has_const_key = Impl::is_same<const_key_type,declared_key_type>::value; 00244 static const bool has_const_value = is_set || Impl::is_same<const_value_type,declared_value_type>::value; 00245 00246 static const bool is_insertable_map = !has_const_key && (is_set || !has_const_value); 00247 static const bool is_modifiable_map = has_const_key && !has_const_value; 00248 static const bool is_const_map = has_const_key && has_const_value; 00249 00250 00251 typedef UnorderedMapInsertResult insert_result; 00252 00253 /*typedef typename Device::host_mirror_device_type host_mirror_device_type; 00254 00255 typedef UnorderedMap<Key,Value,host_mirror_device_type,Hasher,EqualTo> HostMirror;*/ 00256 00257 typedef Impl::UnorderedMapHistogram<const_map_type> histogram_type; 00258 00260 00261 private: 00262 enum { invalid_index = ~static_cast<size_type>(0) }; 00263 00264 typedef typename Impl::if_c< is_set, int, declared_value_type>::type impl_value_type; 00265 00266 typedef typename Impl::if_c< is_insertable_map 00267 , View< key_type *, device_type> 00268 , View< const key_type *, device_type, MemoryTraits<RandomAccess> > 00269 >::type key_type_view; 00270 00271 typedef typename Impl::if_c< is_insertable_map || is_modifiable_map 00272 , View< impl_value_type *, device_type> 00273 , View< const impl_value_type *, device_type, MemoryTraits<RandomAccess> > 00274 >::type value_type_view; 00275 00276 typedef typename Impl::if_c< is_insertable_map 00277 , View< size_type *, device_type> 00278 , View< const size_type *, device_type, MemoryTraits<RandomAccess> > 00279 >::type size_type_view; 00280 00281 typedef typename Impl::if_c< is_insertable_map 00282 , Bitset< device_type > 00283 , ConstBitset< device_type> 00284 >::type bitset_type; 00285 00286 enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 }; 00287 enum { num_scalars = 3 }; 00288 typedef View< int[num_scalars], LayoutLeft, device_type> scalars_view; 00289 00290 public: 00292 00293 00294 UnorderedMap() 00295 : m_bounded_insert() 00296 , m_hasher() 00297 , m_equal_to() 00298 , m_size() 00299 , m_available_indexes() 00300 , m_hash_lists() 00301 , m_next_index() 00302 , m_keys() 00303 , m_values() 00304 , m_scalars() 00305 {} 00306 00312 UnorderedMap( size_type capacity_hint, hasher_type hasher = hasher_type(), equal_to_type equal_to = equal_to_type() ) 00313 : m_bounded_insert(true) 00314 , m_hasher(hasher) 00315 , m_equal_to(equal_to) 00316 , m_size() 00317 , m_available_indexes(calculate_capacity(capacity_hint)) 00318 , m_hash_lists(AllocateWithoutInitializing(), "UnorderedMap hash list", Impl::find_hash_size(capacity())) 00319 , m_next_index(AllocateWithoutInitializing(), "UnorderedMap next index", capacity()+1) // +1 so that the *_at functions can always return a valid reference 00320 , m_keys("UnorderedMap keys",capacity()+1) 00321 , m_values("UnorderedMap values",(is_set? 1 : capacity()+1)) 00322 , m_scalars("UnorderedMap scalars") 00323 { 00324 if (!is_insertable_map) { 00325 throw std::runtime_error("Cannot construct a non-insertable (i.e. const key_type) unordered_map"); 00326 } 00327 00328 Kokkos::deep_copy(m_hash_lists, invalid_index); 00329 Kokkos::deep_copy(m_next_index, invalid_index); 00330 } 00331 00332 void reset_failed_insert_flag() 00333 { 00334 reset_flag(failed_insert_idx); 00335 } 00336 00337 histogram_type get_histogram() 00338 { 00339 return histogram_type(*this); 00340 } 00341 00343 void clear() 00344 { 00345 m_bounded_insert = true; 00346 00347 if (capacity() == 0) return; 00348 00349 m_available_indexes.clear(); 00350 00351 Kokkos::deep_copy(m_hash_lists, invalid_index); 00352 Kokkos::deep_copy(m_next_index, invalid_index); 00353 { 00354 const key_type tmp = key_type(); 00355 Kokkos::deep_copy(m_keys,tmp); 00356 } 00357 if (is_set){ 00358 const impl_value_type tmp = impl_value_type(); 00359 Kokkos::deep_copy(m_values,tmp); 00360 } 00361 { 00362 Kokkos::deep_copy(m_scalars, 0); 00363 } 00364 } 00365 00376 bool rehash(size_type requested_capacity = 0) 00377 { 00378 const bool bounded_insert = (capacity() == 0) || (size() == 0u); 00379 return rehash(requested_capacity, bounded_insert ); 00380 } 00381 00382 bool rehash(size_type requested_capacity, bool bounded_insert) 00383 { 00384 if(!is_insertable_map) return false; 00385 00386 const size_type curr_size = size(); 00387 requested_capacity = (requested_capacity < curr_size) ? curr_size : requested_capacity; 00388 00389 insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to); 00390 00391 if (curr_size) { 00392 tmp.m_bounded_insert = false; 00393 Impl::UnorderedMapRehash<insertable_map_type> f(tmp,*this); 00394 f.apply(); 00395 } 00396 tmp.m_bounded_insert = bounded_insert; 00397 00398 *this = tmp; 00399 00400 return true; 00401 } 00402 00410 size_type size() const 00411 { 00412 if( capacity() == 0u ) return 0u; 00413 if (modified()) { 00414 m_size = m_available_indexes.count(); 00415 reset_flag(modified_idx); 00416 } 00417 return m_size; 00418 } 00419 00425 bool failed_insert() const 00426 { 00427 return get_flag(failed_insert_idx); 00428 } 00429 00430 bool erasable() const 00431 { 00432 return is_insertable_map ? get_flag(erasable_idx) : false; 00433 } 00434 00435 bool begin_erase() 00436 { 00437 bool result = !erasable(); 00438 if (is_insertable_map && result) { 00439 device_type::fence(); 00440 set_flag(erasable_idx); 00441 device_type::fence(); 00442 } 00443 return result; 00444 } 00445 00446 bool end_erase() 00447 { 00448 bool result = erasable(); 00449 if (is_insertable_map && result) { 00450 device_type::fence(); 00451 Impl::UnorderedMapErase<declared_map_type> f(*this); 00452 f.apply(); 00453 device_type::fence(); 00454 reset_flag(erasable_idx); 00455 } 00456 return result; 00457 } 00458 00463 KOKKOS_FORCEINLINE_FUNCTION 00464 size_type capacity() const 00465 { return m_available_indexes.size(); } 00466 00477 KOKKOS_INLINE_FUNCTION 00478 size_type hash_capacity() const 00479 { return m_hash_lists.size(); } 00480 00481 //--------------------------------------------------------------------------- 00482 //--------------------------------------------------------------------------- 00483 00484 00493 KOKKOS_INLINE_FUNCTION 00494 insert_result insert(key_type const& k, impl_value_type const&v = impl_value_type()) const 00495 { 00496 insert_result result; 00497 00498 if ( !is_insertable_map || capacity() == 0u || m_scalars((int)erasable_idx) ) { 00499 return result; 00500 } 00501 00502 if ( !m_scalars((int)modified_idx) ) { 00503 m_scalars((int)modified_idx) = true; 00504 } 00505 00506 int volatile & failed_insert_ref = m_scalars((int)failed_insert_idx) ; 00507 00508 const size_type hash_value = m_hasher(k); 00509 const size_type hash_list = hash_value % m_hash_lists.size(); 00510 00511 size_type * curr_ptr = & m_hash_lists[ hash_list ]; 00512 size_type new_index = invalid_index ; 00513 00514 // Force integer multiply to long 00515 size_type index_hint = static_cast<size_type>( (static_cast<double>(hash_list) * capacity()) / m_hash_lists.size()); 00516 00517 size_type find_attempts = 0; 00518 00519 enum { bounded_find_attempts = 32u }; 00520 const size_type max_attempts = (m_bounded_insert && (bounded_find_attempts < m_available_indexes.max_hint()) ) ? 00521 bounded_find_attempts : 00522 m_available_indexes.max_hint(); 00523 00524 bool not_done = true ; 00525 00526 #if defined( __MIC__ ) 00527 #pragma noprefetch 00528 #endif 00529 while ( not_done ) { 00530 00531 // Continue searching the unordered list for this key, 00532 // list will only be appended during insert phase. 00533 // Need volatile_load as other threads may be appending. 00534 size_type curr = volatile_load(curr_ptr); 00535 00536 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]); 00537 #if defined( __MIC__ ) 00538 #pragma noprefetch 00539 #endif 00540 while ( curr != invalid_index && ! m_equal_to( volatile_load(&m_keys[curr]), k) ) { 00541 result.increment_list_position(); 00542 index_hint = curr; 00543 curr_ptr = &m_next_index[curr]; 00544 curr = volatile_load(curr_ptr); 00545 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]); 00546 } 00547 00548 //------------------------------------------------------------ 00549 // If key already present then return that index. 00550 if ( curr != invalid_index ) { 00551 00552 const bool free_existing = new_index != invalid_index; 00553 if ( free_existing ) { 00554 // Previously claimed an unused entry that was not inserted. 00555 // Release this unused entry immediately. 00556 if (!m_available_indexes.reset(new_index) ) { 00557 printf("Unable to free existing\n"); 00558 } 00559 00560 } 00561 00562 result.set_existing(curr, free_existing); 00563 not_done = false ; 00564 } 00565 //------------------------------------------------------------ 00566 // Key is not currently in the map. 00567 // If the thread has claimed an entry try to insert now. 00568 else { 00569 00570 //------------------------------------------------------------ 00571 // If have not already claimed an unused entry then do so now. 00572 if (new_index == invalid_index) { 00573 00574 bool found = false; 00575 // use the hash_list as the flag for the search direction 00576 Kokkos::tie(found, index_hint) = m_available_indexes.find_any_unset_near( index_hint, hash_list ); 00577 00578 // found and index and this thread set it 00579 if ( !found && ++find_attempts >= max_attempts ) { 00580 failed_insert_ref = true; 00581 not_done = false ; 00582 } 00583 else if (m_available_indexes.set(index_hint) ) { 00584 new_index = index_hint; 00585 // Set key and value 00586 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]); 00587 m_keys[new_index] = k ; 00588 00589 if (!is_set) { 00590 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]); 00591 m_values[new_index] = v ; 00592 } 00593 00594 // Do not proceed until key and value are updated in global memory 00595 memory_fence(); 00596 } 00597 } 00598 else if (failed_insert_ref) { 00599 not_done = false; 00600 } 00601 00602 // Attempt to append claimed entry into the list. 00603 // Another thread may also be trying to append the same list so protect with atomic. 00604 if ( new_index != invalid_index && 00605 curr == atomic_compare_exchange(curr_ptr, static_cast<size_type>(invalid_index), new_index) ) { 00606 // Succeeded in appending 00607 result.set_success(new_index); 00608 not_done = false ; 00609 } 00610 } 00611 } // while ( not_done ) 00612 00613 return result ; 00614 } 00615 00616 KOKKOS_INLINE_FUNCTION 00617 bool erase(key_type const& k) const 00618 { 00619 bool result = false; 00620 00621 if(is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) { 00622 00623 if ( ! m_scalars((int)modified_idx) ) { 00624 m_scalars((int)modified_idx) = true; 00625 } 00626 00627 size_type index = find(k); 00628 if (valid_at(index)) { 00629 m_available_indexes.reset(index); 00630 result = true; 00631 } 00632 } 00633 00634 return result; 00635 } 00636 00644 KOKKOS_INLINE_FUNCTION 00645 size_type find( const key_type & k) const 00646 { 00647 size_type curr = 0u < capacity() ? m_hash_lists( m_hasher(k) % m_hash_lists.size() ) : invalid_index ; 00648 00649 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]); 00650 while (curr != invalid_index && !m_equal_to( m_keys[curr], k) ) { 00651 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]); 00652 curr = m_next_index[curr]; 00653 } 00654 00655 return curr; 00656 } 00657 00662 KOKKOS_INLINE_FUNCTION 00663 bool exists( const key_type & k) const 00664 { 00665 return valid_at(find(k)); 00666 } 00667 00668 00677 KOKKOS_FORCEINLINE_FUNCTION 00678 typename Impl::if_c< (is_set || has_const_value), impl_value_type, impl_value_type &>::type 00679 value_at(size_type i) const 00680 { 00681 return m_values[ is_set ? 0 : (i < capacity() ? i : capacity()) ]; 00682 } 00683 00690 KOKKOS_FORCEINLINE_FUNCTION 00691 key_type key_at(size_type i) const 00692 { 00693 return m_keys[ i < capacity() ? i : capacity() ]; 00694 } 00695 00696 KOKKOS_FORCEINLINE_FUNCTION 00697 bool valid_at(size_type i) const 00698 { 00699 return m_available_indexes.test(i); 00700 } 00701 00702 template <typename SKey, typename SValue> 00703 UnorderedMap( UnorderedMap<SKey,SValue,Device,Hasher,EqualTo> const& src, 00704 typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value,int>::type = 0 00705 ) 00706 : m_bounded_insert(src.m_bounded_insert) 00707 , m_hasher(src.m_hasher) 00708 , m_equal_to(src.m_equal_to) 00709 , m_size(src.m_size) 00710 , m_available_indexes(src.m_available_indexes) 00711 , m_hash_lists(src.m_hash_lists) 00712 , m_next_index(src.m_next_index) 00713 , m_keys(src.m_keys) 00714 , m_values(src.m_values) 00715 , m_scalars(src.m_scalars) 00716 {} 00717 00718 00719 template <typename SKey, typename SValue> 00720 typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value 00721 ,declared_map_type & >::type 00722 operator=( UnorderedMap<SKey,SValue,Device,Hasher,EqualTo> const& src) 00723 { 00724 m_bounded_insert = src.m_bounded_insert; 00725 m_hasher = src.m_hasher; 00726 m_equal_to = src.m_equal_to; 00727 m_size = src.m_size; 00728 m_available_indexes = src.m_available_indexes; 00729 m_hash_lists = src.m_hash_lists; 00730 m_next_index = src.m_next_index; 00731 m_keys = src.m_keys; 00732 m_values = src.m_values; 00733 m_scalars = src.m_scalars; 00734 return *this; 00735 } 00736 00737 template <typename SKey, typename SValue, typename SDevice> 00738 typename Impl::enable_if< Impl::is_same< typename Impl::remove_const<SKey>::type, key_type>::value && 00739 Impl::is_same< typename Impl::remove_const<SValue>::type, value_type>::value 00740 >::type 00741 create_copy_view( UnorderedMap<SKey, SValue, SDevice, Hasher,EqualTo> const& src) 00742 { 00743 if (m_hash_lists.ptr_on_device() != src.m_hash_lists.ptr_on_device()) { 00744 00745 insertable_map_type tmp; 00746 00747 tmp.m_bounded_insert = src.m_bounded_insert; 00748 tmp.m_hasher = src.m_hasher; 00749 tmp.m_equal_to = src.m_equal_to; 00750 tmp.m_size = src.size(); 00751 tmp.m_available_indexes = bitset_type( src.capacity() ); 00752 tmp.m_hash_lists = size_type_view( AllocateWithoutInitializing(), "UnorderedMap hash list", src.m_hash_lists.size() ); 00753 tmp.m_next_index = size_type_view( AllocateWithoutInitializing(), "UnorderedMap next index", src.m_next_index.size() ); 00754 tmp.m_keys = key_type_view( AllocateWithoutInitializing(), "UnorderedMap keys", src.m_keys.size() ); 00755 tmp.m_values = value_type_view( AllocateWithoutInitializing(), "UnorderedMap values", src.m_values.size() ); 00756 tmp.m_scalars = scalars_view("UnorderedMap scalars"); 00757 00758 Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes); 00759 00760 typedef Kokkos::Impl::DeepCopy< typename device_type::memory_space, typename SDevice::memory_space > raw_deep_copy; 00761 00762 raw_deep_copy(tmp.m_hash_lists.ptr_on_device(), src.m_hash_lists.ptr_on_device(), sizeof(size_type)*src.m_hash_lists.size()); 00763 raw_deep_copy(tmp.m_next_index.ptr_on_device(), src.m_next_index.ptr_on_device(), sizeof(size_type)*src.m_next_index.size()); 00764 raw_deep_copy(tmp.m_keys.ptr_on_device(), src.m_keys.ptr_on_device(), sizeof(key_type)*src.m_keys.size()); 00765 if (!is_set) { 00766 raw_deep_copy(tmp.m_values.ptr_on_device(), src.m_values.ptr_on_device(), sizeof(impl_value_type)*src.m_values.size()); 00767 } 00768 raw_deep_copy(tmp.m_scalars.ptr_on_device(), src.m_scalars.ptr_on_device(), sizeof(int)*num_scalars ); 00769 00770 *this = tmp; 00771 } 00772 } 00773 00775 private: // private member functions 00776 00777 bool modified() const 00778 { 00779 return get_flag(modified_idx); 00780 } 00781 00782 void set_flag(int flag) const 00783 { 00784 typedef Kokkos::Impl::DeepCopy< typename device_type::memory_space, Kokkos::HostSpace > raw_deep_copy; 00785 const int true_ = true; 00786 raw_deep_copy(m_scalars.ptr_on_device() + flag, &true_, sizeof(int)); 00787 } 00788 00789 void reset_flag(int flag) const 00790 { 00791 typedef Kokkos::Impl::DeepCopy< typename device_type::memory_space, Kokkos::HostSpace > raw_deep_copy; 00792 const int false_ = false; 00793 raw_deep_copy(m_scalars.ptr_on_device() + flag, &false_, sizeof(int)); 00794 } 00795 00796 bool get_flag(int flag) const 00797 { 00798 typedef Kokkos::Impl::DeepCopy< Kokkos::HostSpace, typename device_type::memory_space > raw_deep_copy; 00799 int result = false; 00800 raw_deep_copy(&result, m_scalars.ptr_on_device() + flag, sizeof(int)); 00801 return result; 00802 } 00803 00804 static uint32_t calculate_capacity(uint32_t capacity_hint) 00805 { 00806 // increase by 16% and round to nears multiple of 128 00807 return capacity_hint ? ((static_cast<uint32_t>(7ull*capacity_hint/6u) + 127u)/128u)*128u : 128u; 00808 } 00809 00810 private: // private members 00811 bool m_bounded_insert; 00812 hasher_type m_hasher; 00813 equal_to_type m_equal_to; 00814 mutable size_type m_size; 00815 bitset_type m_available_indexes; 00816 size_type_view m_hash_lists; 00817 size_type_view m_next_index; 00818 key_type_view m_keys; 00819 value_type_view m_values; 00820 scalars_view m_scalars; 00821 00822 template <typename KKey, typename VValue, typename DDevice, typename HHash, typename EEqualTo> 00823 friend class UnorderedMap; 00824 00825 template <typename UMap> 00826 friend struct Impl::UnorderedMapErase; 00827 00828 template <typename UMap> 00829 friend struct Impl::UnorderedMapHistogram; 00830 00831 template <typename UMap> 00832 friend struct Impl::UnorderedMapPrint; 00833 }; 00834 00835 // Specialization of deep_copy for two UnorderedMap objects. 00836 template < typename DKey, typename DT, typename DDevice 00837 , typename SKey, typename ST, typename SDevice 00838 , typename Hasher, typename EqualTo > 00839 inline void deep_copy( UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> & dst 00840 , const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> & src ) 00841 { 00842 dst.create_copy_view(src); 00843 } 00844 00845 00846 } // namespace Kokkos 00847 00848 #endif //KOKKOS_UNORDERED_MAP_HPP
1.7.6.1