Blender V5.0
BLI_linear_allocator.hh
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
8
9#pragma once
10
11#include "BLI_cpp_type.hh"
12#include "BLI_string_ref.hh"
13#include "BLI_utility_mixins.hh"
14#include "BLI_vector.hh"
15
16namespace blender {
17
22// #define BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
23
29template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopyable, NonMovable {
30 private:
31 BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
32 Vector<void *, 2> owned_buffers_;
33
34 uintptr_t current_begin_;
35 uintptr_t current_end_;
36
37 /* Buffers larger than that are not packed together with smaller allocations to avoid wasting
38 * memory. */
39 constexpr static int64_t large_buffer_threshold = 4096;
40
41 public:
42#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
43 int64_t user_requested_size_ = 0;
44 int64_t owned_allocation_size_ = 0;
45#endif
46
48 {
49 current_begin_ = 0;
50 current_end_ = 0;
51 }
52
54 {
55 for (void *ptr : owned_buffers_) {
56 allocator_.deallocate(ptr);
57 }
58 }
59
66 void *allocate(const int64_t size, const int64_t alignment)
67 {
68 BLI_assert(size >= 0);
69 BLI_assert(alignment >= 1);
70 BLI_assert(is_power_of_2(alignment));
71
72 const uintptr_t alignment_mask = alignment - 1;
73 const uintptr_t potential_allocation_begin = (current_begin_ + alignment_mask) &
74 ~alignment_mask;
75 const uintptr_t potential_allocation_end = potential_allocation_begin + size;
76
77 if (potential_allocation_end <= current_end_) {
78#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
79 user_requested_size_ += size;
80#endif
81 current_begin_ = potential_allocation_end;
82 return reinterpret_cast<void *>(potential_allocation_begin);
83 }
84 if (size <= large_buffer_threshold) {
85 this->allocate_new_buffer(size + alignment, alignment);
86 return this->allocate(size, alignment);
87 }
88#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
89 user_requested_size_ += size;
90#endif
91 return this->allocator_large_buffer(size, alignment);
92 };
93
99 template<typename T> T *allocate()
100 {
101 return static_cast<T *>(this->allocate(sizeof(T), alignof(T)));
102 }
103
105 void *allocate(const CPPType &type)
106 {
107 return this->allocate(type.size, type.alignment);
108 }
109
116 {
117 T *array = static_cast<T *>(this->allocate(sizeof(T) * size, alignof(T)));
118 return MutableSpan<T>(array, size);
119 }
120
122 void *allocate_array(const CPPType &type, const int64_t size)
123 {
124 return this->allocate(type.size * size, type.alignment);
125 }
126
135 template<typename T, typename... Args> destruct_ptr<T> construct(Args &&...args)
136 {
137 void *buffer = this->allocate(sizeof(T), alignof(T));
138 T *value = new (buffer) T(std::forward<Args>(args)...);
139 return destruct_ptr<T>(value);
140 }
141
147 template<typename T, typename... Args>
149 {
151 for (const int64_t i : IndexRange(size)) {
152 new (&array[i]) T(std::forward<Args>(args)...);
153 }
154 return array;
155 }
156
161 {
162 if (src.is_empty()) {
163 return {};
164 }
165 MutableSpan<T> dst = this->allocate_array<T>(src.size());
166 uninitialized_copy_n(src.data(), src.size(), dst.data());
167 return dst;
168 }
169
175 {
176 const int64_t alloc_size = str.size() + 1;
177 char *buffer = static_cast<char *>(this->allocate(alloc_size, 1));
178 str.copy_unsafe(buffer);
179 return StringRefNull(static_cast<const char *>(buffer));
180 }
181
183 int64_t element_size,
184 int64_t element_alignment)
185 {
186 void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *));
187 void *elements_buffer = this->allocate(element_amount * element_size, element_alignment);
188
189 MutableSpan<void *> pointers(static_cast<void **>(pointer_buffer), element_amount);
190 void *next_element_buffer = elements_buffer;
191 for (int64_t i : IndexRange(element_amount)) {
192 pointers[i] = next_element_buffer;
193 next_element_buffer = POINTER_OFFSET(next_element_buffer, element_size);
194 }
195
196 return pointers;
197 }
198
199 template<typename T, typename... Args>
201 {
203 n, sizeof(T), alignof(T));
204 MutableSpan<T *> pointers = void_pointers.cast<T *>();
205
206 for (int64_t i : IndexRange(n)) {
207 new (static_cast<void *>(pointers[i])) T(std::forward<Args>(args)...);
208 }
209
210 return pointers;
211 }
212
217 void provide_buffer(void *buffer, const int64_t size)
218 {
219 BLI_assert(owned_buffers_.is_empty());
220 current_begin_ = uintptr_t(buffer);
221 current_end_ = current_begin_ + size;
222 }
223
224 template<size_t Size, size_t Alignment>
226 {
227 this->provide_buffer(aligned_buffer.ptr(), Size);
228 }
229
241 void free_end_of_previous_allocation(const int64_t original_allocation_size,
242 const void *free_after)
243 {
244 /* If the original allocation size was large, it might have been separately allocated. In this
245 * case, we can't free the end of it anymore. */
246 if (original_allocation_size <= large_buffer_threshold) {
247 const int64_t new_begin = uintptr_t(free_after);
248 BLI_assert(new_begin <= current_begin_);
249#ifndef NDEBUG
250 /* This condition is not really necessary but it helps finding the cases where memory was
251 * freed. */
252 const int64_t freed_bytes_num = current_begin_ - new_begin;
253 if (freed_bytes_num > 0) {
254 current_begin_ = new_begin;
255 }
256#else
257 current_begin_ = new_begin;
258#endif
259 }
260 }
261
270 {
271 owned_buffers_.extend(other.owned_buffers_);
272#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
273 user_requested_size_ += other.user_requested_size_;
274 owned_allocation_size_ += other.owned_allocation_size_;
275#endif
276 other.owned_buffers_.clear();
277 std::destroy_at(&other);
278 new (&other) LinearAllocator<>();
279 }
280
281 private:
282 void allocate_new_buffer(int64_t min_allocation_size, int64_t min_alignment)
283 {
284 /* Possibly allocate more bytes than necessary for the current allocation. This way more small
285 * allocations can be packed together. Large buffers are allocated exactly to avoid wasting too
286 * much memory. */
287 int64_t size_in_bytes = min_allocation_size;
288 if (size_in_bytes <= large_buffer_threshold) {
289 /* Gradually grow buffer size with each allocation, up to a maximum. */
290 const int grow_size = 1 << std::min<int>(owned_buffers_.size() + 6, 20);
291 size_in_bytes = std::min(large_buffer_threshold,
292 std::max<int64_t>(size_in_bytes, grow_size));
293 }
294
295 void *buffer = this->allocated_owned(size_in_bytes, min_alignment);
296 current_begin_ = uintptr_t(buffer);
297 current_end_ = current_begin_ + size_in_bytes;
298 }
299
300 void *allocator_large_buffer(const int64_t size, const int64_t alignment)
301 {
302 return this->allocated_owned(size, alignment);
303 }
304
305 void *allocated_owned(const int64_t size, const int64_t alignment)
306 {
307 void *buffer = allocator_.allocate(size, alignment, __func__);
308 owned_buffers_.append(buffer);
309#ifdef BLI_DEBUG_LINEAR_ALLOCATOR_SIZE
310 owned_allocation_size_ += size;
311#endif
312 return buffer;
313 }
314};
315
316} // namespace blender
#define BLI_assert(a)
Definition BLI_assert.h:46
#define POINTER_OFFSET(v, ofs)
#define BLI_NO_UNIQUE_ADDRESS
long long int int64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
void clear()
destruct_ptr< T > construct(Args &&...args)
void provide_buffer(void *buffer, const int64_t size)
MutableSpan< T > allocate_array(int64_t size)
StringRefNull copy_string(StringRef str)
void * allocate(const int64_t size, const int64_t alignment)
void provide_buffer(AlignedBuffer< Size, Alignment > &aligned_buffer)
MutableSpan< T > construct_array(int64_t size, Args &&...args)
void free_end_of_previous_allocation(const int64_t original_allocation_size, const void *free_after)
Span< T * > construct_elements_and_pointer_array(int64_t n, Args &&...args)
MutableSpan< T > construct_array_copy(Span< T > src)
void * allocate_array(const CPPType &type, const int64_t size)
void transfer_ownership_from(LinearAllocator<> &other)
void * allocate(const CPPType &type)
MutableSpan< void * > allocate_elements_and_pointer_array(int64_t element_amount, int64_t element_size, int64_t element_alignment)
constexpr MutableSpan< NewT > cast() const
Definition BLI_span.hh:749
constexpr T * data() const
Definition BLI_span.hh:539
NonCopyable(const NonCopyable &other)=delete
NonMovable(NonMovable &&other)=delete
constexpr const T * data() const
Definition BLI_span.hh:215
constexpr int64_t size() const
Definition BLI_span.hh:252
constexpr bool is_empty() const
Definition BLI_span.hh:260
int64_t size() const
#define str(s)
#define T
std::unique_ptr< T, DestructValueAtAddress< T > > destruct_ptr
void uninitialized_copy_n(const T *src, int64_t n, T *dst)
i
Definition text_draw.cc:230
PointerRNA * ptr
Definition wm_files.cc:4238