Blender V4.3
BLI_linear_allocator.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
13#pragma once
14
15#include "BLI_string_ref.hh"
16#include "BLI_utility_mixins.hh"
17#include "BLI_vector.hh"
18
19namespace blender {
20
25// #define BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
26
27template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopyable, NonMovable {
28 private:
29 BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
30 Vector<void *, 2> owned_buffers_;
31
32 uintptr_t current_begin_;
33 uintptr_t current_end_;
34
35 /* Buffers larger than that are not packed together with smaller allocations to avoid wasting
36 * memory. */
37 constexpr static inline int64_t large_buffer_threshold = 4096;
38
39 public:
40#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
41 int64_t user_requested_size_ = 0;
42 int64_t owned_allocation_size_ = 0;
43#endif
44
46 {
47 current_begin_ = 0;
48 current_end_ = 0;
49 }
50
52 {
53 for (void *ptr : owned_buffers_) {
54 allocator_.deallocate(ptr);
55 }
56 }
57
64 void *allocate(const int64_t size, const int64_t alignment)
65 {
66 BLI_assert(size >= 0);
67 BLI_assert(alignment >= 1);
68 BLI_assert(is_power_of_2(alignment));
69
70 const uintptr_t alignment_mask = alignment - 1;
71 const uintptr_t potential_allocation_begin = (current_begin_ + alignment_mask) &
72 ~alignment_mask;
73 const uintptr_t potential_allocation_end = potential_allocation_begin + size;
74
75 if (potential_allocation_end <= current_end_) {
76#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
77 user_requested_size_ += size;
78#endif
79 current_begin_ = potential_allocation_end;
80 return reinterpret_cast<void *>(potential_allocation_begin);
81 }
82 if (size <= large_buffer_threshold) {
83 this->allocate_new_buffer(size + alignment, alignment);
84 return this->allocate(size, alignment);
85 }
86#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
87 user_requested_size_ += size;
88#endif
89 return this->allocator_large_buffer(size, alignment);
90 };
91
97 template<typename T> T *allocate()
98 {
99 return static_cast<T *>(this->allocate(sizeof(T), alignof(T)));
100 }
101
107 template<typename T> MutableSpan<T> allocate_array(int64_t size)
108 {
109 T *array = static_cast<T *>(this->allocate(sizeof(T) * size, alignof(T)));
110 return MutableSpan<T>(array, size);
111 }
112
121 template<typename T, typename... Args> destruct_ptr<T> construct(Args &&...args)
122 {
123 void *buffer = this->allocate(sizeof(T), alignof(T));
124 T *value = new (buffer) T(std::forward<Args>(args)...);
125 return destruct_ptr<T>(value);
126 }
127
133 template<typename T, typename... Args>
135 {
137 for (const int64_t i : IndexRange(size)) {
138 new (&array[i]) T(std::forward<Args>(args)...);
139 }
140 return array;
141 }
142
147 {
148 if (src.is_empty()) {
149 return {};
150 }
151 MutableSpan<T> dst = this->allocate_array<T>(src.size());
152 uninitialized_copy_n(src.data(), src.size(), dst.data());
153 return dst;
154 }
155
161 {
162 const int64_t alloc_size = str.size() + 1;
163 char *buffer = static_cast<char *>(this->allocate(alloc_size, 1));
164 str.copy(buffer, alloc_size);
165 return StringRefNull(static_cast<const char *>(buffer));
166 }
167
169 int64_t element_size,
170 int64_t element_alignment)
171 {
172 void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *));
173 void *elements_buffer = this->allocate(element_amount * element_size, element_alignment);
174
175 MutableSpan<void *> pointers(static_cast<void **>(pointer_buffer), element_amount);
176 void *next_element_buffer = elements_buffer;
177 for (int64_t i : IndexRange(element_amount)) {
178 pointers[i] = next_element_buffer;
179 next_element_buffer = POINTER_OFFSET(next_element_buffer, element_size);
180 }
181
182 return pointers;
183 }
184
185 template<typename T, typename... Args>
187 {
189 n, sizeof(T), alignof(T));
190 MutableSpan<T *> pointers = void_pointers.cast<T *>();
191
192 for (int64_t i : IndexRange(n)) {
193 new (static_cast<void *>(pointers[i])) T(std::forward<Args>(args)...);
194 }
195
196 return pointers;
197 }
198
203 void provide_buffer(void *buffer, const int64_t size)
204 {
205 BLI_assert(owned_buffers_.is_empty());
206 current_begin_ = uintptr_t(buffer);
207 current_end_ = current_begin_ + size;
208 }
209
210 template<size_t Size, size_t Alignment>
212 {
213 this->provide_buffer(aligned_buffer.ptr(), Size);
214 }
215
227 void free_end_of_previous_allocation(const int64_t original_allocation_size,
228 const void *free_after)
229 {
230 /* If the original allocation size was large, it might have been separately allocated. In this
231 * case, we can't free the end of it anymore. */
232 if (original_allocation_size <= large_buffer_threshold) {
233 const int64_t new_begin = uintptr_t(free_after);
234 BLI_assert(new_begin <= current_begin_);
235#ifndef NDEBUG
236 /* This condition is not really necessary but it helps finding the cases where memory was
237 * freed. */
238 const int64_t freed_bytes_num = current_begin_ - new_begin;
239 if (freed_bytes_num > 0) {
240 current_begin_ = new_begin;
241 }
242#else
243 current_begin_ = new_begin;
244#endif
245 }
246 }
247
256 {
257 owned_buffers_.extend(other.owned_buffers_);
258#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
259 user_requested_size_ += other.user_requested_size_;
260 owned_allocation_size_ += other.owned_allocation_size_;
261#endif
262 other.owned_buffers_.clear();
263 std::destroy_at(&other);
264 new (&other) LinearAllocator<>();
265 }
266
267 private:
268 void allocate_new_buffer(int64_t min_allocation_size, int64_t min_alignment)
269 {
270 /* Possibly allocate more bytes than necessary for the current allocation. This way more small
271 * allocations can be packed together. Large buffers are allocated exactly to avoid wasting too
272 * much memory. */
273 int64_t size_in_bytes = min_allocation_size;
274 if (size_in_bytes <= large_buffer_threshold) {
275 /* Gradually grow buffer size with each allocation, up to a maximum. */
276 const int grow_size = 1 << std::min<int>(owned_buffers_.size() + 6, 20);
277 size_in_bytes = std::min(large_buffer_threshold,
278 std::max<int64_t>(size_in_bytes, grow_size));
279 }
280
281 void *buffer = this->allocated_owned(size_in_bytes, min_alignment);
282 current_begin_ = uintptr_t(buffer);
283 current_end_ = current_begin_ + size_in_bytes;
284 }
285
286 void *allocator_large_buffer(const int64_t size, const int64_t alignment)
287 {
288 return this->allocated_owned(size, alignment);
289 }
290
291 void *allocated_owned(const int64_t size, const int64_t alignment)
292 {
293 void *buffer = allocator_.allocate(size, alignment, __func__);
294 owned_buffers_.append(buffer);
295#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
296 owned_allocation_size_ += size;
297#endif
298 return buffer;
299 }
300};
301
302} // namespace blender
#define BLI_assert(a)
Definition BLI_assert.h:50
#define POINTER_OFFSET(v, ofs)
#define BLI_NO_UNIQUE_ADDRESS
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
destruct_ptr< T > construct(Args &&...args)
void provide_buffer(void *buffer, const int64_t size)
MutableSpan< T > allocate_array(int64_t size)
StringRefNull copy_string(StringRef str)
void * allocate(const int64_t size, const int64_t alignment)
void provide_buffer(AlignedBuffer< Size, Alignment > &aligned_buffer)
MutableSpan< T > construct_array(int64_t size, Args &&...args)
void free_end_of_previous_allocation(const int64_t original_allocation_size, const void *free_after)
Span< T * > construct_elements_and_pointer_array(int64_t n, Args &&...args)
MutableSpan< T > construct_array_copy(Span< T > src)
void transfer_ownership_from(LinearAllocator<> &other)
MutableSpan< void * > allocate_elements_and_pointer_array(int64_t element_amount, int64_t element_size, int64_t element_alignment)
constexpr MutableSpan< NewT > cast() const
Definition BLI_span.hh:736
constexpr T * data() const
Definition BLI_span.hh:540
constexpr const T * data() const
Definition BLI_span.hh:216
constexpr int64_t size() const
Definition BLI_span.hh:253
constexpr bool is_empty() const
Definition BLI_span.hh:261
constexpr int64_t size() const
int64_t size() const
void append(const T &value)
bool is_empty() const
void extend(Span< T > array)
#define str(s)
#define T
std::unique_ptr< T, DestructValueAtAddress< T > > destruct_ptr
void uninitialized_copy_n(const T *src, int64_t n, T *dst)
_W64 unsigned int uintptr_t
Definition stdint.h:119
__int64 int64_t
Definition stdint.h:89
PointerRNA * ptr
Definition wm_files.cc:4126